testing.py 102 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068
  1. from __future__ import division
  2. from contextlib import contextmanager
  3. from datetime import datetime
  4. from functools import wraps
  5. import locale
  6. import os
  7. import re
  8. from shutil import rmtree
  9. import string
  10. import subprocess
  11. import sys
  12. import tempfile
  13. import traceback
  14. import warnings
  15. import numpy as np
  16. from numpy.random import rand, randn
  17. from pandas._libs import testing as _testing
  18. import pandas.compat as compat
  19. from pandas.compat import (
  20. PY2, PY3, Counter, callable, filter, httplib, lmap, lrange, lzip, map,
  21. raise_with_traceback, range, string_types, u, unichr, zip)
  22. from pandas.core.dtypes.common import (
  23. is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
  24. is_datetimelike_v_numeric, is_datetimelike_v_object,
  25. is_extension_array_dtype, is_interval_dtype, is_list_like, is_number,
  26. is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion)
  27. from pandas.core.dtypes.missing import array_equivalent
  28. import pandas as pd
  29. from pandas import (
  30. Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index,
  31. IntervalIndex, MultiIndex, Panel, RangeIndex, Series, bdate_range)
  32. from pandas.core.algorithms import take_1d
  33. from pandas.core.arrays import (
  34. DatetimeArray, ExtensionArray, IntervalArray, PeriodArray, TimedeltaArray,
  35. period_array)
  36. import pandas.core.common as com
  37. from pandas.io.common import urlopen
  38. from pandas.io.formats.printing import pprint_thing
  39. N = 30
  40. K = 4
  41. _RAISE_NETWORK_ERROR_DEFAULT = False
  42. # set testing_mode
  43. _testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
  44. def set_testing_mode():
  45. # set the testing mode filters
  46. testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
  47. if 'deprecate' in testing_mode:
  48. warnings.simplefilter('always', _testing_mode_warnings)
  49. def reset_testing_mode():
  50. # reset the testing mode filters
  51. testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
  52. if 'deprecate' in testing_mode:
  53. warnings.simplefilter('ignore', _testing_mode_warnings)
  54. set_testing_mode()
  55. def reset_display_options():
  56. """
  57. Reset the display options for printing and representing objects.
  58. """
  59. pd.reset_option('^display.', silent=True)
  60. def round_trip_pickle(obj, path=None):
  61. """
  62. Pickle an object and then read it again.
  63. Parameters
  64. ----------
  65. obj : pandas object
  66. The object to pickle and then re-read.
  67. path : str, default None
  68. The path where the pickled object is written and then read.
  69. Returns
  70. -------
  71. round_trip_pickled_object : pandas object
  72. The original object that was pickled and then re-read.
  73. """
  74. if path is None:
  75. path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))
  76. with ensure_clean(path) as path:
  77. pd.to_pickle(obj, path)
  78. return pd.read_pickle(path)
  79. def round_trip_pathlib(writer, reader, path=None):
  80. """
  81. Write an object to file specified by a pathlib.Path and read it back
  82. Parameters
  83. ----------
  84. writer : callable bound to pandas object
  85. IO writing function (e.g. DataFrame.to_csv )
  86. reader : callable
  87. IO reading function (e.g. pd.read_csv )
  88. path : str, default None
  89. The path where the object is written and then read.
  90. Returns
  91. -------
  92. round_trip_object : pandas object
  93. The original object that was serialized and then re-read.
  94. """
  95. import pytest
  96. Path = pytest.importorskip('pathlib').Path
  97. if path is None:
  98. path = '___pathlib___'
  99. with ensure_clean(path) as path:
  100. writer(Path(path))
  101. obj = reader(Path(path))
  102. return obj
  103. def round_trip_localpath(writer, reader, path=None):
  104. """
  105. Write an object to file specified by a py.path LocalPath and read it back
  106. Parameters
  107. ----------
  108. writer : callable bound to pandas object
  109. IO writing function (e.g. DataFrame.to_csv )
  110. reader : callable
  111. IO reading function (e.g. pd.read_csv )
  112. path : str, default None
  113. The path where the object is written and then read.
  114. Returns
  115. -------
  116. round_trip_object : pandas object
  117. The original object that was serialized and then re-read.
  118. """
  119. import pytest
  120. LocalPath = pytest.importorskip('py.path').local
  121. if path is None:
  122. path = '___localpath___'
  123. with ensure_clean(path) as path:
  124. writer(LocalPath(path))
  125. obj = reader(LocalPath(path))
  126. return obj
  127. @contextmanager
  128. def decompress_file(path, compression):
  129. """
  130. Open a compressed file and return a file object
  131. Parameters
  132. ----------
  133. path : str
  134. The path where the file is read from
  135. compression : {'gzip', 'bz2', 'zip', 'xz', None}
  136. Name of the decompression to use
  137. Returns
  138. -------
  139. f : file object
  140. """
  141. if compression is None:
  142. f = open(path, 'rb')
  143. elif compression == 'gzip':
  144. import gzip
  145. f = gzip.open(path, 'rb')
  146. elif compression == 'bz2':
  147. import bz2
  148. f = bz2.BZ2File(path, 'rb')
  149. elif compression == 'xz':
  150. lzma = compat.import_lzma()
  151. f = lzma.LZMAFile(path, 'rb')
  152. elif compression == 'zip':
  153. import zipfile
  154. zip_file = zipfile.ZipFile(path)
  155. zip_names = zip_file.namelist()
  156. if len(zip_names) == 1:
  157. f = zip_file.open(zip_names.pop())
  158. else:
  159. raise ValueError('ZIP file {} error. Only one file per ZIP.'
  160. .format(path))
  161. else:
  162. msg = 'Unrecognized compression type: {}'.format(compression)
  163. raise ValueError(msg)
  164. try:
  165. yield f
  166. finally:
  167. f.close()
  168. if compression == "zip":
  169. zip_file.close()
  170. def write_to_compressed(compression, path, data, dest="test"):
  171. """
  172. Write data to a compressed file.
  173. Parameters
  174. ----------
  175. compression : {'gzip', 'bz2', 'zip', 'xz'}
  176. The compression type to use.
  177. path : str
  178. The file path to write the data.
  179. data : str
  180. The data to write.
  181. dest : str, default "test"
  182. The destination file (for ZIP only)
  183. Raises
  184. ------
  185. ValueError : An invalid compression value was passed in.
  186. """
  187. if compression == "zip":
  188. import zipfile
  189. compress_method = zipfile.ZipFile
  190. elif compression == "gzip":
  191. import gzip
  192. compress_method = gzip.GzipFile
  193. elif compression == "bz2":
  194. import bz2
  195. compress_method = bz2.BZ2File
  196. elif compression == "xz":
  197. lzma = compat.import_lzma()
  198. compress_method = lzma.LZMAFile
  199. else:
  200. msg = "Unrecognized compression type: {}".format(compression)
  201. raise ValueError(msg)
  202. if compression == "zip":
  203. mode = "w"
  204. args = (dest, data)
  205. method = "writestr"
  206. else:
  207. mode = "wb"
  208. args = (data,)
  209. method = "write"
  210. with compress_method(path, mode=mode) as f:
  211. getattr(f, method)(*args)
  212. def assert_almost_equal(left, right, check_dtype="equiv",
  213. check_less_precise=False, **kwargs):
  214. """
  215. Check that the left and right objects are approximately equal.
  216. By approximately equal, we refer to objects that are numbers or that
  217. contain numbers which may be equivalent to specific levels of precision.
  218. Parameters
  219. ----------
  220. left : object
  221. right : object
  222. check_dtype : bool / string {'equiv'}, default 'equiv'
  223. Check dtype if both a and b are the same type. If 'equiv' is passed in,
  224. then `RangeIndex` and `Int64Index` are also considered equivalent
  225. when doing type checking.
  226. check_less_precise : bool or int, default False
  227. Specify comparison precision. 5 digits (False) or 3 digits (True)
  228. after decimal points are compared. If int, then specify the number
  229. of digits to compare.
  230. When comparing two numbers, if the first number has magnitude less
  231. than 1e-5, we compare the two numbers directly and check whether
  232. they are equivalent within the specified precision. Otherwise, we
  233. compare the **ratio** of the second number to the first number and
  234. check whether it is equivalent to 1 within the specified precision.
  235. """
  236. if isinstance(left, pd.Index):
  237. return assert_index_equal(left, right,
  238. check_exact=False,
  239. exact=check_dtype,
  240. check_less_precise=check_less_precise,
  241. **kwargs)
  242. elif isinstance(left, pd.Series):
  243. return assert_series_equal(left, right,
  244. check_exact=False,
  245. check_dtype=check_dtype,
  246. check_less_precise=check_less_precise,
  247. **kwargs)
  248. elif isinstance(left, pd.DataFrame):
  249. return assert_frame_equal(left, right,
  250. check_exact=False,
  251. check_dtype=check_dtype,
  252. check_less_precise=check_less_precise,
  253. **kwargs)
  254. else:
  255. # Other sequences.
  256. if check_dtype:
  257. if is_number(left) and is_number(right):
  258. # Do not compare numeric classes, like np.float64 and float.
  259. pass
  260. elif is_bool(left) and is_bool(right):
  261. # Do not compare bool classes, like np.bool_ and bool.
  262. pass
  263. else:
  264. if (isinstance(left, np.ndarray) or
  265. isinstance(right, np.ndarray)):
  266. obj = "numpy array"
  267. else:
  268. obj = "Input"
  269. assert_class_equal(left, right, obj=obj)
  270. return _testing.assert_almost_equal(
  271. left, right,
  272. check_dtype=check_dtype,
  273. check_less_precise=check_less_precise,
  274. **kwargs)
  275. def _check_isinstance(left, right, cls):
  276. """
  277. Helper method for our assert_* methods that ensures that
  278. the two objects being compared have the right type before
  279. proceeding with the comparison.
  280. Parameters
  281. ----------
  282. left : The first object being compared.
  283. right : The second object being compared.
  284. cls : The class type to check against.
  285. Raises
  286. ------
  287. AssertionError : Either `left` or `right` is not an instance of `cls`.
  288. """
  289. err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
  290. cls_name = cls.__name__
  291. if not isinstance(left, cls):
  292. raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
  293. act_type=type(left)))
  294. if not isinstance(right, cls):
  295. raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
  296. act_type=type(right)))
  297. def assert_dict_equal(left, right, compare_keys=True):
  298. _check_isinstance(left, right, dict)
  299. return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
  300. def randbool(size=(), p=0.5):
  301. return rand(*size) <= p
  302. RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
  303. dtype=(np.str_, 1))
  304. RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
  305. string.digits), dtype=(np.unicode_, 1))
  306. def rands_array(nchars, size, dtype='O'):
  307. """Generate an array of byte strings."""
  308. retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
  309. .view((np.str_, nchars)).reshape(size))
  310. if dtype is None:
  311. return retval
  312. else:
  313. return retval.astype(dtype)
  314. def randu_array(nchars, size, dtype='O'):
  315. """Generate an array of unicode strings."""
  316. retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
  317. .view((np.unicode_, nchars)).reshape(size))
  318. if dtype is None:
  319. return retval
  320. else:
  321. return retval.astype(dtype)
  322. def rands(nchars):
  323. """
  324. Generate one random byte string.
  325. See `rands_array` if you want to create an array of random strings.
  326. """
  327. return ''.join(np.random.choice(RANDS_CHARS, nchars))
  328. def randu(nchars):
  329. """
  330. Generate one random unicode string.
  331. See `randu_array` if you want to create an array of random unicode strings.
  332. """
  333. return ''.join(np.random.choice(RANDU_CHARS, nchars))
  334. def close(fignum=None):
  335. from matplotlib.pyplot import get_fignums, close as _close
  336. if fignum is None:
  337. for fignum in get_fignums():
  338. _close(fignum)
  339. else:
  340. _close(fignum)
  341. # -----------------------------------------------------------------------------
  342. # locale utilities
  343. def check_output(*popenargs, **kwargs):
  344. # shamelessly taken from Python 2.7 source
  345. r"""Run command with arguments and return its output as a byte string.
  346. If the exit code was non-zero it raises a CalledProcessError. The
  347. CalledProcessError object will have the return code in the returncode
  348. attribute and output in the output attribute.
  349. The arguments are the same as for the Popen constructor. Example:
  350. >>> check_output(["ls", "-l", "/dev/null"])
  351. 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
  352. The stdout argument is not allowed as it is used internally.
  353. To capture standard error in the result, use stderr=STDOUT.
  354. >>> check_output(["/bin/sh", "-c",
  355. ... "ls -l non_existent_file ; exit 0"],
  356. ... stderr=STDOUT)
  357. 'ls: non_existent_file: No such file or directory\n'
  358. """
  359. if 'stdout' in kwargs:
  360. raise ValueError('stdout argument not allowed, it will be overridden.')
  361. process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
  362. *popenargs, **kwargs)
  363. output, unused_err = process.communicate()
  364. retcode = process.poll()
  365. if retcode:
  366. cmd = kwargs.get("args")
  367. if cmd is None:
  368. cmd = popenargs[0]
  369. raise subprocess.CalledProcessError(retcode, cmd, output=output)
  370. return output
  371. def _default_locale_getter():
  372. try:
  373. raw_locales = check_output(['locale -a'], shell=True)
  374. except subprocess.CalledProcessError as e:
  375. raise type(e)("{exception}, the 'locale -a' command cannot be found "
  376. "on your system".format(exception=e))
  377. return raw_locales
  378. def get_locales(prefix=None, normalize=True,
  379. locale_getter=_default_locale_getter):
  380. """Get all the locales that are available on the system.
  381. Parameters
  382. ----------
  383. prefix : str
  384. If not ``None`` then return only those locales with the prefix
  385. provided. For example to get all English language locales (those that
  386. start with ``"en"``), pass ``prefix="en"``.
  387. normalize : bool
  388. Call ``locale.normalize`` on the resulting list of available locales.
  389. If ``True``, only locales that can be set without throwing an
  390. ``Exception`` are returned.
  391. locale_getter : callable
  392. The function to use to retrieve the current locales. This should return
  393. a string with each locale separated by a newline character.
  394. Returns
  395. -------
  396. locales : list of strings
  397. A list of locale strings that can be set with ``locale.setlocale()``.
  398. For example::
  399. locale.setlocale(locale.LC_ALL, locale_string)
  400. On error will return None (no locale available, e.g. Windows)
  401. """
  402. try:
  403. raw_locales = locale_getter()
  404. except Exception:
  405. return None
  406. try:
  407. # raw_locales is "\n" separated list of locales
  408. # it may contain non-decodable parts, so split
  409. # extract what we can and then rejoin.
  410. raw_locales = raw_locales.split(b'\n')
  411. out_locales = []
  412. for x in raw_locales:
  413. if PY3:
  414. out_locales.append(str(
  415. x, encoding=pd.options.display.encoding))
  416. else:
  417. out_locales.append(str(x))
  418. except TypeError:
  419. pass
  420. if prefix is None:
  421. return _valid_locales(out_locales, normalize)
  422. pattern = re.compile('{prefix}.*'.format(prefix=prefix))
  423. found = pattern.findall('\n'.join(out_locales))
  424. return _valid_locales(found, normalize)
  425. @contextmanager
  426. def set_locale(new_locale, lc_var=locale.LC_ALL):
  427. """Context manager for temporarily setting a locale.
  428. Parameters
  429. ----------
  430. new_locale : str or tuple
  431. A string of the form <language_country>.<encoding>. For example to set
  432. the current locale to US English with a UTF8 encoding, you would pass
  433. "en_US.UTF-8".
  434. lc_var : int, default `locale.LC_ALL`
  435. The category of the locale being set.
  436. Notes
  437. -----
  438. This is useful when you want to run a particular block of code under a
  439. particular locale, without globally setting the locale. This probably isn't
  440. thread-safe.
  441. """
  442. current_locale = locale.getlocale()
  443. try:
  444. locale.setlocale(lc_var, new_locale)
  445. normalized_locale = locale.getlocale()
  446. if com._all_not_none(*normalized_locale):
  447. yield '.'.join(normalized_locale)
  448. else:
  449. yield new_locale
  450. finally:
  451. locale.setlocale(lc_var, current_locale)
  452. def can_set_locale(lc, lc_var=locale.LC_ALL):
  453. """
  454. Check to see if we can set a locale, and subsequently get the locale,
  455. without raising an Exception.
  456. Parameters
  457. ----------
  458. lc : str
  459. The locale to attempt to set.
  460. lc_var : int, default `locale.LC_ALL`
  461. The category of the locale being set.
  462. Returns
  463. -------
  464. is_valid : bool
  465. Whether the passed locale can be set
  466. """
  467. try:
  468. with set_locale(lc, lc_var=lc_var):
  469. pass
  470. except (ValueError,
  471. locale.Error): # horrible name for a Exception subclass
  472. return False
  473. else:
  474. return True
  475. def _valid_locales(locales, normalize):
  476. """Return a list of normalized locales that do not throw an ``Exception``
  477. when set.
  478. Parameters
  479. ----------
  480. locales : str
  481. A string where each locale is separated by a newline.
  482. normalize : bool
  483. Whether to call ``locale.normalize`` on each locale.
  484. Returns
  485. -------
  486. valid_locales : list
  487. A list of valid locales.
  488. """
  489. if normalize:
  490. normalizer = lambda x: locale.normalize(x.strip())
  491. else:
  492. normalizer = lambda x: x.strip()
  493. return list(filter(can_set_locale, map(normalizer, locales)))
  494. # -----------------------------------------------------------------------------
  495. # Stdout / stderr decorators
  496. @contextmanager
  497. def set_defaultencoding(encoding):
  498. """
  499. Set default encoding (as given by sys.getdefaultencoding()) to the given
  500. encoding; restore on exit.
  501. Parameters
  502. ----------
  503. encoding : str
  504. """
  505. if not PY2:
  506. raise ValueError("set_defaultencoding context is only available "
  507. "in Python 2.")
  508. orig = sys.getdefaultencoding()
  509. reload(sys) # noqa:F821
  510. sys.setdefaultencoding(encoding)
  511. try:
  512. yield
  513. finally:
  514. sys.setdefaultencoding(orig)
  515. # -----------------------------------------------------------------------------
  516. # Console debugging tools
  517. def debug(f, *args, **kwargs):
  518. from pdb import Pdb as OldPdb
  519. try:
  520. from IPython.core.debugger import Pdb
  521. kw = dict(color_scheme='Linux')
  522. except ImportError:
  523. Pdb = OldPdb
  524. kw = {}
  525. pdb = Pdb(**kw)
  526. return pdb.runcall(f, *args, **kwargs)
  527. def pudebug(f, *args, **kwargs):
  528. import pudb
  529. return pudb.runcall(f, *args, **kwargs)
  530. def set_trace():
  531. from IPython.core.debugger import Pdb
  532. try:
  533. Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
  534. except Exception:
  535. from pdb import Pdb as OldPdb
  536. OldPdb().set_trace(sys._getframe().f_back)
  537. # -----------------------------------------------------------------------------
  538. # contextmanager to ensure the file cleanup
  539. @contextmanager
  540. def ensure_clean(filename=None, return_filelike=False):
  541. """Gets a temporary path and agrees to remove on close.
  542. Parameters
  543. ----------
  544. filename : str (optional)
  545. if None, creates a temporary file which is then removed when out of
  546. scope. if passed, creates temporary file with filename as ending.
  547. return_filelike : bool (default False)
  548. if True, returns a file-like which is *always* cleaned. Necessary for
  549. savefig and other functions which want to append extensions.
  550. """
  551. filename = filename or ''
  552. fd = None
  553. if return_filelike:
  554. f = tempfile.TemporaryFile(suffix=filename)
  555. try:
  556. yield f
  557. finally:
  558. f.close()
  559. else:
  560. # don't generate tempfile if using a path with directory specified
  561. if len(os.path.dirname(filename)):
  562. raise ValueError("Can't pass a qualified name to ensure_clean()")
  563. try:
  564. fd, filename = tempfile.mkstemp(suffix=filename)
  565. except UnicodeEncodeError:
  566. import pytest
  567. pytest.skip('no unicode file names on this system')
  568. try:
  569. yield filename
  570. finally:
  571. try:
  572. os.close(fd)
  573. except Exception:
  574. print("Couldn't close file descriptor: {fdesc} (file: {fname})"
  575. .format(fdesc=fd, fname=filename))
  576. try:
  577. if os.path.exists(filename):
  578. os.remove(filename)
  579. except Exception as e:
  580. print("Exception on removing file: {error}".format(error=e))
  581. @contextmanager
  582. def ensure_clean_dir():
  583. """
  584. Get a temporary directory path and agrees to remove on close.
  585. Yields
  586. ------
  587. Temporary directory path
  588. """
  589. directory_name = tempfile.mkdtemp(suffix='')
  590. try:
  591. yield directory_name
  592. finally:
  593. try:
  594. rmtree(directory_name)
  595. except Exception:
  596. pass
  597. @contextmanager
  598. def ensure_safe_environment_variables():
  599. """
  600. Get a context manager to safely set environment variables
  601. All changes will be undone on close, hence environment variables set
  602. within this contextmanager will neither persist nor change global state.
  603. """
  604. saved_environ = dict(os.environ)
  605. try:
  606. yield
  607. finally:
  608. os.environ.clear()
  609. os.environ.update(saved_environ)
  610. # -----------------------------------------------------------------------------
  611. # Comparators
  612. def equalContents(arr1, arr2):
  613. """Checks if the set of unique elements of arr1 and arr2 are equivalent.
  614. """
  615. return frozenset(arr1) == frozenset(arr2)
  616. def assert_index_equal(left, right, exact='equiv', check_names=True,
  617. check_less_precise=False, check_exact=True,
  618. check_categorical=True, obj='Index'):
  619. """Check that left and right Index are equal.
  620. Parameters
  621. ----------
  622. left : Index
  623. right : Index
  624. exact : bool / string {'equiv'}, default 'equiv'
  625. Whether to check the Index class, dtype and inferred_type
  626. are identical. If 'equiv', then RangeIndex can be substituted for
  627. Int64Index as well.
  628. check_names : bool, default True
  629. Whether to check the names attribute.
  630. check_less_precise : bool or int, default False
  631. Specify comparison precision. Only used when check_exact is False.
  632. 5 digits (False) or 3 digits (True) after decimal points are compared.
  633. If int, then specify the digits to compare
  634. check_exact : bool, default True
  635. Whether to compare number exactly.
  636. check_categorical : bool, default True
  637. Whether to compare internal Categorical exactly.
  638. obj : str, default 'Index'
  639. Specify object name being compared, internally used to show appropriate
  640. assertion message
  641. """
  642. __tracebackhide__ = True
  643. def _check_types(l, r, obj='Index'):
  644. if exact:
  645. assert_class_equal(l, r, exact=exact, obj=obj)
  646. # Skip exact dtype checking when `check_categorical` is False
  647. if check_categorical:
  648. assert_attr_equal('dtype', l, r, obj=obj)
  649. # allow string-like to have different inferred_types
  650. if l.inferred_type in ('string', 'unicode'):
  651. assert r.inferred_type in ('string', 'unicode')
  652. else:
  653. assert_attr_equal('inferred_type', l, r, obj=obj)
  654. def _get_ilevel_values(index, level):
  655. # accept level number only
  656. unique = index.levels[level]
  657. labels = index.codes[level]
  658. filled = take_1d(unique.values, labels, fill_value=unique._na_value)
  659. values = unique._shallow_copy(filled, name=index.names[level])
  660. return values
  661. # instance validation
  662. _check_isinstance(left, right, Index)
  663. # class / dtype comparison
  664. _check_types(left, right, obj=obj)
  665. # level comparison
  666. if left.nlevels != right.nlevels:
  667. msg1 = '{obj} levels are different'.format(obj=obj)
  668. msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)
  669. msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)
  670. raise_assert_detail(obj, msg1, msg2, msg3)
  671. # length comparison
  672. if len(left) != len(right):
  673. msg1 = '{obj} length are different'.format(obj=obj)
  674. msg2 = '{length}, {left}'.format(length=len(left), left=left)
  675. msg3 = '{length}, {right}'.format(length=len(right), right=right)
  676. raise_assert_detail(obj, msg1, msg2, msg3)
  677. # MultiIndex special comparison for little-friendly error messages
  678. if left.nlevels > 1:
  679. for level in range(left.nlevels):
  680. # cannot use get_level_values here because it can change dtype
  681. llevel = _get_ilevel_values(left, level)
  682. rlevel = _get_ilevel_values(right, level)
  683. lobj = 'MultiIndex level [{level}]'.format(level=level)
  684. assert_index_equal(llevel, rlevel,
  685. exact=exact, check_names=check_names,
  686. check_less_precise=check_less_precise,
  687. check_exact=check_exact, obj=lobj)
  688. # get_level_values may change dtype
  689. _check_types(left.levels[level], right.levels[level], obj=obj)
  690. # skip exact index checking when `check_categorical` is False
  691. if check_exact and check_categorical:
  692. if not left.equals(right):
  693. diff = np.sum((left.values != right.values)
  694. .astype(int)) * 100.0 / len(left)
  695. msg = '{obj} values are different ({pct} %)'.format(
  696. obj=obj, pct=np.round(diff, 5))
  697. raise_assert_detail(obj, msg, left, right)
  698. else:
  699. _testing.assert_almost_equal(left.values, right.values,
  700. check_less_precise=check_less_precise,
  701. check_dtype=exact,
  702. obj=obj, lobj=left, robj=right)
  703. # metadata comparison
  704. if check_names:
  705. assert_attr_equal('names', left, right, obj=obj)
  706. if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
  707. assert_attr_equal('freq', left, right, obj=obj)
  708. if (isinstance(left, pd.IntervalIndex) or
  709. isinstance(right, pd.IntervalIndex)):
  710. assert_interval_array_equal(left.values, right.values)
  711. if check_categorical:
  712. if is_categorical_dtype(left) or is_categorical_dtype(right):
  713. assert_categorical_equal(left.values, right.values,
  714. obj='{obj} category'.format(obj=obj))
  715. def assert_class_equal(left, right, exact=True, obj='Input'):
  716. """checks classes are equal."""
  717. __tracebackhide__ = True
  718. def repr_class(x):
  719. if isinstance(x, Index):
  720. # return Index as it is to include values in the error message
  721. return x
  722. try:
  723. return x.__class__.__name__
  724. except AttributeError:
  725. return repr(type(x))
  726. if exact == 'equiv':
  727. if type(left) != type(right):
  728. # allow equivalence of Int64Index/RangeIndex
  729. types = {type(left).__name__, type(right).__name__}
  730. if len(types - {'Int64Index', 'RangeIndex'}):
  731. msg = '{obj} classes are not equivalent'.format(obj=obj)
  732. raise_assert_detail(obj, msg, repr_class(left),
  733. repr_class(right))
  734. elif exact:
  735. if type(left) != type(right):
  736. msg = '{obj} classes are different'.format(obj=obj)
  737. raise_assert_detail(obj, msg, repr_class(left),
  738. repr_class(right))
  739. def assert_attr_equal(attr, left, right, obj='Attributes'):
  740. """checks attributes are equal. Both objects must have attribute.
  741. Parameters
  742. ----------
  743. attr : str
  744. Attribute name being compared.
  745. left : object
  746. right : object
  747. obj : str, default 'Attributes'
  748. Specify object name being compared, internally used to show appropriate
  749. assertion message
  750. """
  751. __tracebackhide__ = True
  752. left_attr = getattr(left, attr)
  753. right_attr = getattr(right, attr)
  754. if left_attr is right_attr:
  755. return True
  756. elif (is_number(left_attr) and np.isnan(left_attr) and
  757. is_number(right_attr) and np.isnan(right_attr)):
  758. # np.nan
  759. return True
  760. try:
  761. result = left_attr == right_attr
  762. except TypeError:
  763. # datetimetz on rhs may raise TypeError
  764. result = False
  765. if not isinstance(result, bool):
  766. result = result.all()
  767. if result:
  768. return True
  769. else:
  770. msg = 'Attribute "{attr}" are different'.format(attr=attr)
  771. raise_assert_detail(obj, msg, left_attr, right_attr)
  772. def assert_is_valid_plot_return_object(objs):
  773. import matplotlib.pyplot as plt
  774. if isinstance(objs, (pd.Series, np.ndarray)):
  775. for el in objs.ravel():
  776. msg = ("one of 'objs' is not a matplotlib Axes instance, type "
  777. "encountered {name!r}").format(name=el.__class__.__name__)
  778. assert isinstance(el, (plt.Axes, dict)), msg
  779. else:
  780. assert isinstance(objs, (plt.Artist, tuple, dict)), (
  781. 'objs is neither an ndarray of Artist instances nor a '
  782. 'single Artist instance, tuple, or dict, "objs" is a {name!r}'
  783. .format(name=objs.__class__.__name__))
  784. def isiterable(obj):
  785. return hasattr(obj, '__iter__')
  786. def is_sorted(seq):
  787. if isinstance(seq, (Index, Series)):
  788. seq = seq.values
  789. # sorting does not change precisions
  790. return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
  791. def assert_categorical_equal(left, right, check_dtype=True,
  792. check_category_order=True, obj='Categorical'):
  793. """Test that Categoricals are equivalent.
  794. Parameters
  795. ----------
  796. left : Categorical
  797. right : Categorical
  798. check_dtype : bool, default True
  799. Check that integer dtype of the codes are the same
  800. check_category_order : bool, default True
  801. Whether the order of the categories should be compared, which
  802. implies identical integer codes. If False, only the resulting
  803. values are compared. The ordered attribute is
  804. checked regardless.
  805. obj : str, default 'Categorical'
  806. Specify object name being compared, internally used to show appropriate
  807. assertion message
  808. """
  809. _check_isinstance(left, right, Categorical)
  810. if check_category_order:
  811. assert_index_equal(left.categories, right.categories,
  812. obj='{obj}.categories'.format(obj=obj))
  813. assert_numpy_array_equal(left.codes, right.codes,
  814. check_dtype=check_dtype,
  815. obj='{obj}.codes'.format(obj=obj))
  816. else:
  817. assert_index_equal(left.categories.sort_values(),
  818. right.categories.sort_values(),
  819. obj='{obj}.categories'.format(obj=obj))
  820. assert_index_equal(left.categories.take(left.codes),
  821. right.categories.take(right.codes),
  822. obj='{obj}.values'.format(obj=obj))
  823. assert_attr_equal('ordered', left, right, obj=obj)
  824. def assert_interval_array_equal(left, right, exact='equiv',
  825. obj='IntervalArray'):
  826. """Test that two IntervalArrays are equivalent.
  827. Parameters
  828. ----------
  829. left, right : IntervalArray
  830. The IntervalArrays to compare.
  831. exact : bool / string {'equiv'}, default 'equiv'
  832. Whether to check the Index class, dtype and inferred_type
  833. are identical. If 'equiv', then RangeIndex can be substituted for
  834. Int64Index as well.
  835. obj : str, default 'IntervalArray'
  836. Specify object name being compared, internally used to show appropriate
  837. assertion message
  838. """
  839. _check_isinstance(left, right, IntervalArray)
  840. assert_index_equal(left.left, right.left, exact=exact,
  841. obj='{obj}.left'.format(obj=obj))
  842. assert_index_equal(left.right, right.right, exact=exact,
  843. obj='{obj}.left'.format(obj=obj))
  844. assert_attr_equal('closed', left, right, obj=obj)
  845. def assert_period_array_equal(left, right, obj='PeriodArray'):
  846. _check_isinstance(left, right, PeriodArray)
  847. assert_numpy_array_equal(left._data, right._data,
  848. obj='{obj}.values'.format(obj=obj))
  849. assert_attr_equal('freq', left, right, obj=obj)
  850. def assert_datetime_array_equal(left, right, obj='DatetimeArray'):
  851. __tracebackhide__ = True
  852. _check_isinstance(left, right, DatetimeArray)
  853. assert_numpy_array_equal(left._data, right._data,
  854. obj='{obj}._data'.format(obj=obj))
  855. assert_attr_equal('freq', left, right, obj=obj)
  856. assert_attr_equal('tz', left, right, obj=obj)
  857. def assert_timedelta_array_equal(left, right, obj='TimedeltaArray'):
  858. __tracebackhide__ = True
  859. _check_isinstance(left, right, TimedeltaArray)
  860. assert_numpy_array_equal(left._data, right._data,
  861. obj='{obj}._data'.format(obj=obj))
  862. assert_attr_equal('freq', left, right, obj=obj)
  863. def raise_assert_detail(obj, message, left, right, diff=None):
  864. __tracebackhide__ = True
  865. if isinstance(left, np.ndarray):
  866. left = pprint_thing(left)
  867. elif is_categorical_dtype(left):
  868. left = repr(left)
  869. if PY2 and isinstance(left, string_types):
  870. # left needs to be printable in native text type in python2
  871. left = left.encode('utf-8')
  872. if isinstance(right, np.ndarray):
  873. right = pprint_thing(right)
  874. elif is_categorical_dtype(right):
  875. right = repr(right)
  876. if PY2 and isinstance(right, string_types):
  877. # right needs to be printable in native text type in python2
  878. right = right.encode('utf-8')
  879. msg = """{obj} are different
  880. {message}
  881. [left]: {left}
  882. [right]: {right}""".format(obj=obj, message=message, left=left, right=right)
  883. if diff is not None:
  884. msg += "\n[diff]: {diff}".format(diff=diff)
  885. raise AssertionError(msg)
  886. def assert_numpy_array_equal(left, right, strict_nan=False,
  887. check_dtype=True, err_msg=None,
  888. check_same=None, obj='numpy array'):
  889. """ Checks that 'np.ndarray' is equivalent
  890. Parameters
  891. ----------
  892. left : np.ndarray or iterable
  893. right : np.ndarray or iterable
  894. strict_nan : bool, default False
  895. If True, consider NaN and None to be different.
  896. check_dtype: bool, default True
  897. check dtype if both a and b are np.ndarray
  898. err_msg : str, default None
  899. If provided, used as assertion message
  900. check_same : None|'copy'|'same', default None
  901. Ensure left and right refer/do not refer to the same memory area
  902. obj : str, default 'numpy array'
  903. Specify object name being compared, internally used to show appropriate
  904. assertion message
  905. """
  906. __tracebackhide__ = True
  907. # instance validation
  908. # Show a detailed error message when classes are different
  909. assert_class_equal(left, right, obj=obj)
  910. # both classes must be an np.ndarray
  911. _check_isinstance(left, right, np.ndarray)
  912. def _get_base(obj):
  913. return obj.base if getattr(obj, 'base', None) is not None else obj
  914. left_base = _get_base(left)
  915. right_base = _get_base(right)
  916. if check_same == 'same':
  917. if left_base is not right_base:
  918. msg = "{left!r} is not {right!r}".format(
  919. left=left_base, right=right_base)
  920. raise AssertionError(msg)
  921. elif check_same == 'copy':
  922. if left_base is right_base:
  923. msg = "{left!r} is {right!r}".format(
  924. left=left_base, right=right_base)
  925. raise AssertionError(msg)
  926. def _raise(left, right, err_msg):
  927. if err_msg is None:
  928. if left.shape != right.shape:
  929. raise_assert_detail(obj, '{obj} shapes are different'
  930. .format(obj=obj), left.shape, right.shape)
  931. diff = 0
  932. for l, r in zip(left, right):
  933. # count up differences
  934. if not array_equivalent(l, r, strict_nan=strict_nan):
  935. diff += 1
  936. diff = diff * 100.0 / left.size
  937. msg = '{obj} values are different ({pct} %)'.format(
  938. obj=obj, pct=np.round(diff, 5))
  939. raise_assert_detail(obj, msg, left, right)
  940. raise AssertionError(err_msg)
  941. # compare shape and values
  942. if not array_equivalent(left, right, strict_nan=strict_nan):
  943. _raise(left, right, err_msg)
  944. if check_dtype:
  945. if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
  946. assert_attr_equal('dtype', left, right, obj=obj)
  947. return True
  948. def assert_extension_array_equal(left, right, check_dtype=True,
  949. check_less_precise=False,
  950. check_exact=False):
  951. """Check that left and right ExtensionArrays are equal.
  952. Parameters
  953. ----------
  954. left, right : ExtensionArray
  955. The two arrays to compare
  956. check_dtype : bool, default True
  957. Whether to check if the ExtensionArray dtypes are identical.
  958. check_less_precise : bool or int, default False
  959. Specify comparison precision. Only used when check_exact is False.
  960. 5 digits (False) or 3 digits (True) after decimal points are compared.
  961. If int, then specify the digits to compare.
  962. check_exact : bool, default False
  963. Whether to compare number exactly.
  964. Notes
  965. -----
  966. Missing values are checked separately from valid values.
  967. A mask of missing values is computed for each and checked to match.
  968. The remaining all-valid values are cast to object dtype and checked.
  969. """
  970. assert isinstance(left, ExtensionArray), 'left is not an ExtensionArray'
  971. assert isinstance(right, ExtensionArray), 'right is not an ExtensionArray'
  972. if check_dtype:
  973. assert_attr_equal('dtype', left, right, obj='ExtensionArray')
  974. if hasattr(left, "asi8") and type(right) == type(left):
  975. # Avoid slow object-dtype comparisons
  976. assert_numpy_array_equal(left.asi8, right.asi8)
  977. return
  978. left_na = np.asarray(left.isna())
  979. right_na = np.asarray(right.isna())
  980. assert_numpy_array_equal(left_na, right_na, obj='ExtensionArray NA mask')
  981. left_valid = np.asarray(left[~left_na].astype(object))
  982. right_valid = np.asarray(right[~right_na].astype(object))
  983. if check_exact:
  984. assert_numpy_array_equal(left_valid, right_valid, obj='ExtensionArray')
  985. else:
  986. _testing.assert_almost_equal(left_valid, right_valid,
  987. check_dtype=check_dtype,
  988. check_less_precise=check_less_precise,
  989. obj='ExtensionArray')
  990. # This could be refactored to use the NDFrame.equals method
  991. def assert_series_equal(left, right, check_dtype=True,
  992. check_index_type='equiv',
  993. check_series_type=True,
  994. check_less_precise=False,
  995. check_names=True,
  996. check_exact=False,
  997. check_datetimelike_compat=False,
  998. check_categorical=True,
  999. obj='Series'):
  1000. """Check that left and right Series are equal.
  1001. Parameters
  1002. ----------
  1003. left : Series
  1004. right : Series
  1005. check_dtype : bool, default True
  1006. Whether to check the Series dtype is identical.
  1007. check_index_type : bool / string {'equiv'}, default 'equiv'
  1008. Whether to check the Index class, dtype and inferred_type
  1009. are identical.
  1010. check_series_type : bool, default True
  1011. Whether to check the Series class is identical.
  1012. check_less_precise : bool or int, default False
  1013. Specify comparison precision. Only used when check_exact is False.
  1014. 5 digits (False) or 3 digits (True) after decimal points are compared.
  1015. If int, then specify the digits to compare.
  1016. check_names : bool, default True
  1017. Whether to check the Series and Index names attribute.
  1018. check_exact : bool, default False
  1019. Whether to compare number exactly.
  1020. check_datetimelike_compat : bool, default False
  1021. Compare datetime-like which is comparable ignoring dtype.
  1022. check_categorical : bool, default True
  1023. Whether to compare internal Categorical exactly.
  1024. obj : str, default 'Series'
  1025. Specify object name being compared, internally used to show appropriate
  1026. assertion message.
  1027. """
  1028. __tracebackhide__ = True
  1029. # instance validation
  1030. _check_isinstance(left, right, Series)
  1031. if check_series_type:
  1032. # ToDo: There are some tests using rhs is sparse
  1033. # lhs is dense. Should use assert_class_equal in future
  1034. assert isinstance(left, type(right))
  1035. # assert_class_equal(left, right, obj=obj)
  1036. # length comparison
  1037. if len(left) != len(right):
  1038. msg1 = '{len}, {left}'.format(len=len(left), left=left.index)
  1039. msg2 = '{len}, {right}'.format(len=len(right), right=right.index)
  1040. raise_assert_detail(obj, 'Series length are different', msg1, msg2)
  1041. # index comparison
  1042. assert_index_equal(left.index, right.index, exact=check_index_type,
  1043. check_names=check_names,
  1044. check_less_precise=check_less_precise,
  1045. check_exact=check_exact,
  1046. check_categorical=check_categorical,
  1047. obj='{obj}.index'.format(obj=obj))
  1048. if check_dtype:
  1049. # We want to skip exact dtype checking when `check_categorical`
  1050. # is False. We'll still raise if only one is a `Categorical`,
  1051. # regardless of `check_categorical`
  1052. if (is_categorical_dtype(left) and is_categorical_dtype(right) and
  1053. not check_categorical):
  1054. pass
  1055. else:
  1056. assert_attr_equal('dtype', left, right)
  1057. if check_exact:
  1058. assert_numpy_array_equal(left.get_values(), right.get_values(),
  1059. check_dtype=check_dtype,
  1060. obj='{obj}'.format(obj=obj),)
  1061. elif check_datetimelike_compat:
  1062. # we want to check only if we have compat dtypes
  1063. # e.g. integer and M|m are NOT compat, but we can simply check
  1064. # the values in that case
  1065. if (is_datetimelike_v_numeric(left, right) or
  1066. is_datetimelike_v_object(left, right) or
  1067. needs_i8_conversion(left) or
  1068. needs_i8_conversion(right)):
  1069. # datetimelike may have different objects (e.g. datetime.datetime
  1070. # vs Timestamp) but will compare equal
  1071. if not Index(left.values).equals(Index(right.values)):
  1072. msg = ('[datetimelike_compat=True] {left} is not equal to '
  1073. '{right}.').format(left=left.values, right=right.values)
  1074. raise AssertionError(msg)
  1075. else:
  1076. assert_numpy_array_equal(left.get_values(), right.get_values(),
  1077. check_dtype=check_dtype)
  1078. elif is_interval_dtype(left) or is_interval_dtype(right):
  1079. assert_interval_array_equal(left.array, right.array)
  1080. elif (is_extension_array_dtype(left.dtype) and
  1081. is_datetime64tz_dtype(left.dtype)):
  1082. # .values is an ndarray, but ._values is the ExtensionArray.
  1083. # TODO: Use .array
  1084. assert is_extension_array_dtype(right.dtype)
  1085. return assert_extension_array_equal(left._values, right._values)
  1086. elif (is_extension_array_dtype(left) and not is_categorical_dtype(left) and
  1087. is_extension_array_dtype(right) and not is_categorical_dtype(right)):
  1088. return assert_extension_array_equal(left.array, right.array)
  1089. else:
  1090. _testing.assert_almost_equal(left.get_values(), right.get_values(),
  1091. check_less_precise=check_less_precise,
  1092. check_dtype=check_dtype,
  1093. obj='{obj}'.format(obj=obj))
  1094. # metadata comparison
  1095. if check_names:
  1096. assert_attr_equal('name', left, right, obj=obj)
  1097. if check_categorical:
  1098. if is_categorical_dtype(left) or is_categorical_dtype(right):
  1099. assert_categorical_equal(left.values, right.values,
  1100. obj='{obj} category'.format(obj=obj))
  1101. # This could be refactored to use the NDFrame.equals method
  1102. def assert_frame_equal(left, right, check_dtype=True,
  1103. check_index_type='equiv',
  1104. check_column_type='equiv',
  1105. check_frame_type=True,
  1106. check_less_precise=False,
  1107. check_names=True,
  1108. by_blocks=False,
  1109. check_exact=False,
  1110. check_datetimelike_compat=False,
  1111. check_categorical=True,
  1112. check_like=False,
  1113. obj='DataFrame'):
  1114. """
  1115. Check that left and right DataFrame are equal.
  1116. This function is intended to compare two DataFrames and output any
  1117. differences. Is is mostly intended for use in unit tests.
  1118. Additional parameters allow varying the strictness of the
  1119. equality checks performed.
  1120. Parameters
  1121. ----------
  1122. left : DataFrame
  1123. First DataFrame to compare.
  1124. right : DataFrame
  1125. Second DataFrame to compare.
  1126. check_dtype : bool, default True
  1127. Whether to check the DataFrame dtype is identical.
  1128. check_index_type : bool / string {'equiv'}, default 'equiv'
  1129. Whether to check the Index class, dtype and inferred_type
  1130. are identical.
  1131. check_column_type : bool / string {'equiv'}, default 'equiv'
  1132. Whether to check the columns class, dtype and inferred_type
  1133. are identical. Is passed as the ``exact`` argument of
  1134. :func:`assert_index_equal`.
  1135. check_frame_type : bool, default True
  1136. Whether to check the DataFrame class is identical.
  1137. check_less_precise : bool or int, default False
  1138. Specify comparison precision. Only used when check_exact is False.
  1139. 5 digits (False) or 3 digits (True) after decimal points are compared.
  1140. If int, then specify the digits to compare.
  1141. check_names : bool, default True
  1142. Whether to check that the `names` attribute for both the `index`
  1143. and `column` attributes of the DataFrame is identical, i.e.
  1144. * left.index.names == right.index.names
  1145. * left.columns.names == right.columns.names
  1146. by_blocks : bool, default False
  1147. Specify how to compare internal data. If False, compare by columns.
  1148. If True, compare by blocks.
  1149. check_exact : bool, default False
  1150. Whether to compare number exactly.
  1151. check_datetimelike_compat : bool, default False
  1152. Compare datetime-like which is comparable ignoring dtype.
  1153. check_categorical : bool, default True
  1154. Whether to compare internal Categorical exactly.
  1155. check_like : bool, default False
  1156. If True, ignore the order of index & columns.
  1157. Note: index labels must match their respective rows
  1158. (same as in columns) - same labels must be with the same data.
  1159. obj : str, default 'DataFrame'
  1160. Specify object name being compared, internally used to show appropriate
  1161. assertion message.
  1162. See Also
  1163. --------
  1164. assert_series_equal : Equivalent method for asserting Series equality.
  1165. DataFrame.equals : Check DataFrame equality.
  1166. Examples
  1167. --------
  1168. This example shows comparing two DataFrames that are equal
  1169. but with columns of differing dtypes.
  1170. >>> from pandas.util.testing import assert_frame_equal
  1171. >>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
  1172. >>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
  1173. df1 equals itself.
  1174. >>> assert_frame_equal(df1, df1)
  1175. df1 differs from df2 as column 'b' is of a different type.
  1176. >>> assert_frame_equal(df1, df2)
  1177. Traceback (most recent call last):
  1178. AssertionError: Attributes are different
  1179. Attribute "dtype" are different
  1180. [left]: int64
  1181. [right]: float64
  1182. Ignore differing dtypes in columns with check_dtype.
  1183. >>> assert_frame_equal(df1, df2, check_dtype=False)
  1184. """
  1185. __tracebackhide__ = True
  1186. # instance validation
  1187. _check_isinstance(left, right, DataFrame)
  1188. if check_frame_type:
  1189. # ToDo: There are some tests using rhs is SparseDataFrame
  1190. # lhs is DataFrame. Should use assert_class_equal in future
  1191. assert isinstance(left, type(right))
  1192. # assert_class_equal(left, right, obj=obj)
  1193. # shape comparison
  1194. if left.shape != right.shape:
  1195. raise_assert_detail(obj,
  1196. 'DataFrame shape mismatch',
  1197. '{shape!r}'.format(shape=left.shape),
  1198. '{shape!r}'.format(shape=right.shape))
  1199. if check_like:
  1200. left, right = left.reindex_like(right), right
  1201. # index comparison
  1202. assert_index_equal(left.index, right.index, exact=check_index_type,
  1203. check_names=check_names,
  1204. check_less_precise=check_less_precise,
  1205. check_exact=check_exact,
  1206. check_categorical=check_categorical,
  1207. obj='{obj}.index'.format(obj=obj))
  1208. # column comparison
  1209. assert_index_equal(left.columns, right.columns, exact=check_column_type,
  1210. check_names=check_names,
  1211. check_less_precise=check_less_precise,
  1212. check_exact=check_exact,
  1213. check_categorical=check_categorical,
  1214. obj='{obj}.columns'.format(obj=obj))
  1215. # compare by blocks
  1216. if by_blocks:
  1217. rblocks = right._to_dict_of_blocks()
  1218. lblocks = left._to_dict_of_blocks()
  1219. for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
  1220. assert dtype in lblocks
  1221. assert dtype in rblocks
  1222. assert_frame_equal(lblocks[dtype], rblocks[dtype],
  1223. check_dtype=check_dtype, obj='DataFrame.blocks')
  1224. # compare by columns
  1225. else:
  1226. for i, col in enumerate(left.columns):
  1227. assert col in right
  1228. lcol = left.iloc[:, i]
  1229. rcol = right.iloc[:, i]
  1230. assert_series_equal(
  1231. lcol, rcol, check_dtype=check_dtype,
  1232. check_index_type=check_index_type,
  1233. check_less_precise=check_less_precise,
  1234. check_exact=check_exact, check_names=check_names,
  1235. check_datetimelike_compat=check_datetimelike_compat,
  1236. check_categorical=check_categorical,
  1237. obj='DataFrame.iloc[:, {idx}]'.format(idx=i))
  1238. def assert_panel_equal(left, right,
  1239. check_dtype=True,
  1240. check_panel_type=False,
  1241. check_less_precise=False,
  1242. check_names=False,
  1243. by_blocks=False,
  1244. obj='Panel'):
  1245. """Check that left and right Panels are equal.
  1246. Parameters
  1247. ----------
  1248. left : Panel (or nd)
  1249. right : Panel (or nd)
  1250. check_dtype : bool, default True
  1251. Whether to check the Panel dtype is identical.
  1252. check_panel_type : bool, default False
  1253. Whether to check the Panel class is identical.
  1254. check_less_precise : bool or int, default False
  1255. Specify comparison precision. Only used when check_exact is False.
  1256. 5 digits (False) or 3 digits (True) after decimal points are compared.
  1257. If int, then specify the digits to compare
  1258. check_names : bool, default True
  1259. Whether to check the Index names attribute.
  1260. by_blocks : bool, default False
  1261. Specify how to compare internal data. If False, compare by columns.
  1262. If True, compare by blocks.
  1263. obj : str, default 'Panel'
  1264. Specify the object name being compared, internally used to show
  1265. the appropriate assertion message.
  1266. """
  1267. if check_panel_type:
  1268. assert_class_equal(left, right, obj=obj)
  1269. for axis in left._AXIS_ORDERS:
  1270. left_ind = getattr(left, axis)
  1271. right_ind = getattr(right, axis)
  1272. assert_index_equal(left_ind, right_ind, check_names=check_names)
  1273. if by_blocks:
  1274. rblocks = right._to_dict_of_blocks()
  1275. lblocks = left._to_dict_of_blocks()
  1276. for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
  1277. assert dtype in lblocks
  1278. assert dtype in rblocks
  1279. array_equivalent(lblocks[dtype].values, rblocks[dtype].values)
  1280. else:
  1281. # can potentially be slow
  1282. for i, item in enumerate(left._get_axis(0)):
  1283. msg = "non-matching item (right) '{item}'".format(item=item)
  1284. assert item in right, msg
  1285. litem = left.iloc[i]
  1286. ritem = right.iloc[i]
  1287. assert_frame_equal(litem, ritem,
  1288. check_less_precise=check_less_precise,
  1289. check_names=check_names)
  1290. for i, item in enumerate(right._get_axis(0)):
  1291. msg = "non-matching item (left) '{item}'".format(item=item)
  1292. assert item in left, msg
  1293. def assert_equal(left, right, **kwargs):
  1294. """
  1295. Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
  1296. Parameters
  1297. ----------
  1298. left : Index, Series, DataFrame, ExtensionArray, or np.ndarray
  1299. right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
  1300. **kwargs
  1301. """
  1302. __tracebackhide__ = True
  1303. if isinstance(left, pd.Index):
  1304. assert_index_equal(left, right, **kwargs)
  1305. elif isinstance(left, pd.Series):
  1306. assert_series_equal(left, right, **kwargs)
  1307. elif isinstance(left, pd.DataFrame):
  1308. assert_frame_equal(left, right, **kwargs)
  1309. elif isinstance(left, IntervalArray):
  1310. assert_interval_array_equal(left, right, **kwargs)
  1311. elif isinstance(left, PeriodArray):
  1312. assert_period_array_equal(left, right, **kwargs)
  1313. elif isinstance(left, DatetimeArray):
  1314. assert_datetime_array_equal(left, right, **kwargs)
  1315. elif isinstance(left, TimedeltaArray):
  1316. assert_timedelta_array_equal(left, right, **kwargs)
  1317. elif isinstance(left, ExtensionArray):
  1318. assert_extension_array_equal(left, right, **kwargs)
  1319. elif isinstance(left, np.ndarray):
  1320. assert_numpy_array_equal(left, right, **kwargs)
  1321. else:
  1322. raise NotImplementedError(type(left))
  1323. def box_expected(expected, box_cls, transpose=True):
  1324. """
  1325. Helper function to wrap the expected output of a test in a given box_class.
  1326. Parameters
  1327. ----------
  1328. expected : np.ndarray, Index, Series
  1329. box_cls : {Index, Series, DataFrame}
  1330. Returns
  1331. -------
  1332. subclass of box_cls
  1333. """
  1334. if box_cls is pd.Index:
  1335. expected = pd.Index(expected)
  1336. elif box_cls is pd.Series:
  1337. expected = pd.Series(expected)
  1338. elif box_cls is pd.DataFrame:
  1339. expected = pd.Series(expected).to_frame()
  1340. if transpose:
  1341. # for vector operations, we we need a DataFrame to be a single-row,
  1342. # not a single-column, in order to operate against non-DataFrame
  1343. # vectors of the same length.
  1344. expected = expected.T
  1345. elif box_cls is PeriodArray:
  1346. # the PeriodArray constructor is not as flexible as period_array
  1347. expected = period_array(expected)
  1348. elif box_cls is DatetimeArray:
  1349. expected = DatetimeArray(expected)
  1350. elif box_cls is TimedeltaArray:
  1351. expected = TimedeltaArray(expected)
  1352. elif box_cls is np.ndarray:
  1353. expected = np.array(expected)
  1354. elif box_cls is to_array:
  1355. expected = to_array(expected)
  1356. else:
  1357. raise NotImplementedError(box_cls)
  1358. return expected
  1359. def to_array(obj):
  1360. # temporary implementation until we get pd.array in place
  1361. if is_period_dtype(obj):
  1362. return period_array(obj)
  1363. elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
  1364. return DatetimeArray._from_sequence(obj)
  1365. elif is_timedelta64_dtype(obj):
  1366. return TimedeltaArray._from_sequence(obj)
  1367. else:
  1368. return np.array(obj)
  1369. # -----------------------------------------------------------------------------
  1370. # Sparse
  1371. def assert_sp_array_equal(left, right, check_dtype=True, check_kind=True,
  1372. check_fill_value=True,
  1373. consolidate_block_indices=False):
  1374. """Check that the left and right SparseArray are equal.
  1375. Parameters
  1376. ----------
  1377. left : SparseArray
  1378. right : SparseArray
  1379. check_dtype : bool, default True
  1380. Whether to check the data dtype is identical.
  1381. check_kind : bool, default True
  1382. Whether to just the kind of the sparse index for each column.
  1383. check_fill_value : bool, default True
  1384. Whether to check that left.fill_value matches right.fill_value
  1385. consolidate_block_indices : bool, default False
  1386. Whether to consolidate contiguous blocks for sparse arrays with
  1387. a BlockIndex. Some operations, e.g. concat, will end up with
  1388. block indices that could be consolidated. Setting this to true will
  1389. create a new BlockIndex for that array, with consolidated
  1390. block indices.
  1391. """
  1392. _check_isinstance(left, right, pd.SparseArray)
  1393. assert_numpy_array_equal(left.sp_values, right.sp_values,
  1394. check_dtype=check_dtype)
  1395. # SparseIndex comparison
  1396. assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
  1397. assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
  1398. if not check_kind:
  1399. left_index = left.sp_index.to_block_index()
  1400. right_index = right.sp_index.to_block_index()
  1401. else:
  1402. left_index = left.sp_index
  1403. right_index = right.sp_index
  1404. if consolidate_block_indices and left.kind == 'block':
  1405. # we'll probably remove this hack...
  1406. left_index = left_index.to_int_index().to_block_index()
  1407. right_index = right_index.to_int_index().to_block_index()
  1408. if not left_index.equals(right_index):
  1409. raise_assert_detail('SparseArray.index', 'index are not equal',
  1410. left_index, right_index)
  1411. else:
  1412. # Just ensure a
  1413. pass
  1414. if check_fill_value:
  1415. assert_attr_equal('fill_value', left, right)
  1416. if check_dtype:
  1417. assert_attr_equal('dtype', left, right)
  1418. assert_numpy_array_equal(left.values, right.values,
  1419. check_dtype=check_dtype)
  1420. def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,
  1421. check_series_type=True, check_names=True,
  1422. check_kind=True,
  1423. check_fill_value=True,
  1424. consolidate_block_indices=False,
  1425. obj='SparseSeries'):
  1426. """Check that the left and right SparseSeries are equal.
  1427. Parameters
  1428. ----------
  1429. left : SparseSeries
  1430. right : SparseSeries
  1431. check_dtype : bool, default True
  1432. Whether to check the Series dtype is identical.
  1433. exact_indices : bool, default True
  1434. check_series_type : bool, default True
  1435. Whether to check the SparseSeries class is identical.
  1436. check_names : bool, default True
  1437. Whether to check the SparseSeries name attribute.
  1438. check_kind : bool, default True
  1439. Whether to just the kind of the sparse index for each column.
  1440. check_fill_value : bool, default True
  1441. Whether to check that left.fill_value matches right.fill_value
  1442. consolidate_block_indices : bool, default False
  1443. Whether to consolidate contiguous blocks for sparse arrays with
  1444. a BlockIndex. Some operations, e.g. concat, will end up with
  1445. block indices that could be consolidated. Setting this to true will
  1446. create a new BlockIndex for that array, with consolidated
  1447. block indices.
  1448. obj : str, default 'SparseSeries'
  1449. Specify the object name being compared, internally used to show
  1450. the appropriate assertion message.
  1451. """
  1452. _check_isinstance(left, right, pd.SparseSeries)
  1453. if check_series_type:
  1454. assert_class_equal(left, right, obj=obj)
  1455. assert_index_equal(left.index, right.index,
  1456. obj='{obj}.index'.format(obj=obj))
  1457. assert_sp_array_equal(left.values, right.values,
  1458. check_kind=check_kind,
  1459. check_fill_value=check_fill_value,
  1460. consolidate_block_indices=consolidate_block_indices)
  1461. if check_names:
  1462. assert_attr_equal('name', left, right)
  1463. if check_dtype:
  1464. assert_attr_equal('dtype', left, right)
  1465. assert_numpy_array_equal(np.asarray(left.values),
  1466. np.asarray(right.values))
  1467. def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
  1468. check_frame_type=True, check_kind=True,
  1469. check_fill_value=True,
  1470. consolidate_block_indices=False,
  1471. obj='SparseDataFrame'):
  1472. """Check that the left and right SparseDataFrame are equal.
  1473. Parameters
  1474. ----------
  1475. left : SparseDataFrame
  1476. right : SparseDataFrame
  1477. check_dtype : bool, default True
  1478. Whether to check the Series dtype is identical.
  1479. exact_indices : bool, default True
  1480. SparseSeries SparseIndex objects must be exactly the same,
  1481. otherwise just compare dense representations.
  1482. check_frame_type : bool, default True
  1483. Whether to check the SparseDataFrame class is identical.
  1484. check_kind : bool, default True
  1485. Whether to just the kind of the sparse index for each column.
  1486. check_fill_value : bool, default True
  1487. Whether to check that left.fill_value matches right.fill_value
  1488. consolidate_block_indices : bool, default False
  1489. Whether to consolidate contiguous blocks for sparse arrays with
  1490. a BlockIndex. Some operations, e.g. concat, will end up with
  1491. block indices that could be consolidated. Setting this to true will
  1492. create a new BlockIndex for that array, with consolidated
  1493. block indices.
  1494. obj : str, default 'SparseDataFrame'
  1495. Specify the object name being compared, internally used to show
  1496. the appropriate assertion message.
  1497. """
  1498. _check_isinstance(left, right, pd.SparseDataFrame)
  1499. if check_frame_type:
  1500. assert_class_equal(left, right, obj=obj)
  1501. assert_index_equal(left.index, right.index,
  1502. obj='{obj}.index'.format(obj=obj))
  1503. assert_index_equal(left.columns, right.columns,
  1504. obj='{obj}.columns'.format(obj=obj))
  1505. if check_fill_value:
  1506. assert_attr_equal('default_fill_value', left, right, obj=obj)
  1507. for col, series in compat.iteritems(left):
  1508. assert (col in right)
  1509. # trade-off?
  1510. if exact_indices:
  1511. assert_sp_series_equal(
  1512. series, right[col],
  1513. check_dtype=check_dtype,
  1514. check_kind=check_kind,
  1515. check_fill_value=check_fill_value,
  1516. consolidate_block_indices=consolidate_block_indices
  1517. )
  1518. else:
  1519. assert_series_equal(series.to_dense(), right[col].to_dense(),
  1520. check_dtype=check_dtype)
  1521. # do I care?
  1522. # assert(left.default_kind == right.default_kind)
  1523. for col in right:
  1524. assert (col in left)
  1525. # -----------------------------------------------------------------------------
  1526. # Others
  1527. def assert_contains_all(iterable, dic):
  1528. for k in iterable:
  1529. assert k in dic, "Did not contain item: '{key!r}'".format(key=k)
  1530. def assert_copy(iter1, iter2, **eql_kwargs):
  1531. """
  1532. iter1, iter2: iterables that produce elements
  1533. comparable with assert_almost_equal
  1534. Checks that the elements are equal, but not
  1535. the same object. (Does not check that items
  1536. in sequences are also not the same object)
  1537. """
  1538. for elem1, elem2 in zip(iter1, iter2):
  1539. assert_almost_equal(elem1, elem2, **eql_kwargs)
  1540. msg = ("Expected object {obj1!r} and object {obj2!r} to be "
  1541. "different objects, but they were the same object."
  1542. ).format(obj1=type(elem1), obj2=type(elem2))
  1543. assert elem1 is not elem2, msg
  1544. def getCols(k):
  1545. return string.ascii_uppercase[:k]
  1546. # make index
  1547. def makeStringIndex(k=10, name=None):
  1548. return Index(rands_array(nchars=10, size=k), name=name)
  1549. def makeUnicodeIndex(k=10, name=None):
  1550. return Index(randu_array(nchars=10, size=k), name=name)
  1551. def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
  1552. """ make a length k index or n categories """
  1553. x = rands_array(nchars=4, size=n)
  1554. return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs)
  1555. def makeIntervalIndex(k=10, name=None, **kwargs):
  1556. """ make a length k IntervalIndex """
  1557. x = np.linspace(0, 100, num=(k + 1))
  1558. return IntervalIndex.from_breaks(x, name=name, **kwargs)
  1559. def makeBoolIndex(k=10, name=None):
  1560. if k == 1:
  1561. return Index([True], name=name)
  1562. elif k == 2:
  1563. return Index([False, True], name=name)
  1564. return Index([False, True] + [False] * (k - 2), name=name)
  1565. def makeIntIndex(k=10, name=None):
  1566. return Index(lrange(k), name=name)
  1567. def makeUIntIndex(k=10, name=None):
  1568. return Index([2**63 + i for i in lrange(k)], name=name)
  1569. def makeRangeIndex(k=10, name=None, **kwargs):
  1570. return RangeIndex(0, k, 1, name=name, **kwargs)
  1571. def makeFloatIndex(k=10, name=None):
  1572. values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
  1573. return Index(values * (10 ** np.random.randint(0, 9)), name=name)
  1574. def makeDateIndex(k=10, freq='B', name=None, **kwargs):
  1575. dt = datetime(2000, 1, 1)
  1576. dr = bdate_range(dt, periods=k, freq=freq, name=name)
  1577. return DatetimeIndex(dr, name=name, **kwargs)
  1578. def makeTimedeltaIndex(k=10, freq='D', name=None, **kwargs):
  1579. return pd.timedelta_range(start='1 day', periods=k, freq=freq,
  1580. name=name, **kwargs)
  1581. def makePeriodIndex(k=10, name=None, **kwargs):
  1582. dt = datetime(2000, 1, 1)
  1583. dr = pd.period_range(start=dt, periods=k, freq='B', name=name, **kwargs)
  1584. return dr
  1585. def makeMultiIndex(k=10, names=None, **kwargs):
  1586. return MultiIndex.from_product(
  1587. (('foo', 'bar'), (1, 2)), names=names, **kwargs)
  1588. def all_index_generator(k=10):
  1589. """Generator which can be iterated over to get instances of all the various
  1590. index classes.
  1591. Parameters
  1592. ----------
  1593. k: length of each of the index instances
  1594. """
  1595. all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
  1596. makeUnicodeIndex, makeDateIndex, makePeriodIndex,
  1597. makeTimedeltaIndex, makeBoolIndex, makeRangeIndex,
  1598. makeIntervalIndex,
  1599. makeCategoricalIndex]
  1600. for make_index_func in all_make_index_funcs:
  1601. yield make_index_func(k=k)
  1602. def index_subclass_makers_generator():
  1603. make_index_funcs = [
  1604. makeDateIndex, makePeriodIndex,
  1605. makeTimedeltaIndex, makeRangeIndex,
  1606. makeIntervalIndex, makeCategoricalIndex,
  1607. makeMultiIndex
  1608. ]
  1609. for make_index_func in make_index_funcs:
  1610. yield make_index_func
  1611. def all_timeseries_index_generator(k=10):
  1612. """Generator which can be iterated over to get instances of all the classes
  1613. which represent time-seires.
  1614. Parameters
  1615. ----------
  1616. k: length of each of the index instances
  1617. """
  1618. make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
  1619. for make_index_func in make_index_funcs:
  1620. yield make_index_func(k=k)
  1621. # make series
  1622. def makeFloatSeries(name=None):
  1623. index = makeStringIndex(N)
  1624. return Series(randn(N), index=index, name=name)
  1625. def makeStringSeries(name=None):
  1626. index = makeStringIndex(N)
  1627. return Series(randn(N), index=index, name=name)
  1628. def makeObjectSeries(name=None):
  1629. dateIndex = makeDateIndex(N)
  1630. dateIndex = Index(dateIndex, dtype=object)
  1631. index = makeStringIndex(N)
  1632. return Series(dateIndex, index=index, name=name)
  1633. def getSeriesData():
  1634. index = makeStringIndex(N)
  1635. return {c: Series(randn(N), index=index) for c in getCols(K)}
  1636. def makeTimeSeries(nper=None, freq='B', name=None):
  1637. if nper is None:
  1638. nper = N
  1639. return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
  1640. def makePeriodSeries(nper=None, name=None):
  1641. if nper is None:
  1642. nper = N
  1643. return Series(randn(nper), index=makePeriodIndex(nper), name=name)
  1644. def getTimeSeriesData(nper=None, freq='B'):
  1645. return {c: makeTimeSeries(nper, freq) for c in getCols(K)}
  1646. def getPeriodData(nper=None):
  1647. return {c: makePeriodSeries(nper) for c in getCols(K)}
  1648. # make frame
  1649. def makeTimeDataFrame(nper=None, freq='B'):
  1650. data = getTimeSeriesData(nper, freq)
  1651. return DataFrame(data)
  1652. def makeDataFrame():
  1653. data = getSeriesData()
  1654. return DataFrame(data)
  1655. def getMixedTypeDict():
  1656. index = Index(['a', 'b', 'c', 'd', 'e'])
  1657. data = {
  1658. 'A': [0., 1., 2., 3., 4.],
  1659. 'B': [0., 1., 0., 1., 0.],
  1660. 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
  1661. 'D': bdate_range('1/1/2009', periods=5)
  1662. }
  1663. return index, data
  1664. def makeMixedDataFrame():
  1665. return DataFrame(getMixedTypeDict()[1])
  1666. def makePeriodFrame(nper=None):
  1667. data = getPeriodData(nper)
  1668. return DataFrame(data)
  1669. def makePanel(nper=None):
  1670. with warnings.catch_warnings(record=True):
  1671. warnings.filterwarnings("ignore", "\\nPanel", FutureWarning)
  1672. cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
  1673. data = {c: makeTimeDataFrame(nper) for c in cols}
  1674. return Panel.fromDict(data)
  1675. def makePeriodPanel(nper=None):
  1676. with warnings.catch_warnings(record=True):
  1677. warnings.filterwarnings("ignore", "\\nPanel", FutureWarning)
  1678. cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
  1679. data = {c: makePeriodFrame(nper) for c in cols}
  1680. return Panel.fromDict(data)
  1681. def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
  1682. idx_type=None):
  1683. """Create an index/multindex with given dimensions, levels, names, etc'
  1684. nentries - number of entries in index
  1685. nlevels - number of levels (> 1 produces multindex)
  1686. prefix - a string prefix for labels
  1687. names - (Optional), bool or list of strings. if True will use default
  1688. names, if false will use no names, if a list is given, the name of
  1689. each level in the index will be taken from the list.
  1690. ndupe_l - (Optional), list of ints, the number of rows for which the
  1691. label will repeated at the corresponding level, you can specify just
  1692. the first few, the rest will use the default ndupe_l of 1.
  1693. len(ndupe_l) <= nlevels.
  1694. idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
  1695. If idx_type is not None, `idx_nlevels` must be 1.
  1696. "i"/"f" creates an integer/float index,
  1697. "s"/"u" creates a string/unicode index
  1698. "dt" create a datetime index.
  1699. "td" create a datetime index.
  1700. if unspecified, string labels will be generated.
  1701. """
  1702. if ndupe_l is None:
  1703. ndupe_l = [1] * nlevels
  1704. assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)
  1705. assert (names is None or names is False or
  1706. names is True or len(names) is nlevels)
  1707. assert idx_type is None or (idx_type in ('i', 'f', 's', 'u',
  1708. 'dt', 'p', 'td')
  1709. and nlevels == 1)
  1710. if names is True:
  1711. # build default names
  1712. names = [prefix + str(i) for i in range(nlevels)]
  1713. if names is False:
  1714. # pass None to index constructor for no name
  1715. names = None
  1716. # make singelton case uniform
  1717. if isinstance(names, compat.string_types) and nlevels == 1:
  1718. names = [names]
  1719. # specific 1D index type requested?
  1720. idx_func = dict(i=makeIntIndex, f=makeFloatIndex,
  1721. s=makeStringIndex, u=makeUnicodeIndex,
  1722. dt=makeDateIndex, td=makeTimedeltaIndex,
  1723. p=makePeriodIndex).get(idx_type)
  1724. if idx_func:
  1725. idx = idx_func(nentries)
  1726. # but we need to fill in the name
  1727. if names:
  1728. idx.name = names[0]
  1729. return idx
  1730. elif idx_type is not None:
  1731. raise ValueError('"{idx_type}" is not a legal value for `idx_type`, '
  1732. 'use "i"/"f"/"s"/"u"/"dt/"p"/"td".'
  1733. .format(idx_type=idx_type))
  1734. if len(ndupe_l) < nlevels:
  1735. ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
  1736. assert len(ndupe_l) == nlevels
  1737. assert all(x > 0 for x in ndupe_l)
  1738. tuples = []
  1739. for i in range(nlevels):
  1740. def keyfunc(x):
  1741. import re
  1742. numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
  1743. return lmap(int, numeric_tuple)
  1744. # build a list of lists to create the index from
  1745. div_factor = nentries // ndupe_l[i] + 1
  1746. cnt = Counter()
  1747. for j in range(div_factor):
  1748. label = '{prefix}_l{i}_g{j}'.format(prefix=prefix, i=i, j=j)
  1749. cnt[label] = ndupe_l[i]
  1750. # cute Counter trick
  1751. result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]
  1752. tuples.append(result)
  1753. tuples = lzip(*tuples)
  1754. # convert tuples to index
  1755. if nentries == 1:
  1756. # we have a single level of tuples, i.e. a regular Index
  1757. index = Index(tuples[0], name=names[0])
  1758. elif nlevels == 1:
  1759. name = None if names is None else names[0]
  1760. index = Index((x[0] for x in tuples), name=name)
  1761. else:
  1762. index = MultiIndex.from_tuples(tuples, names=names)
  1763. return index
  1764. def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,
  1765. c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None,
  1766. c_ndupe_l=None, r_ndupe_l=None, dtype=None,
  1767. c_idx_type=None, r_idx_type=None):
  1768. """
  1769. nrows, ncols - number of data rows/cols
  1770. c_idx_names, idx_names - False/True/list of strings, yields No names ,
  1771. default names or uses the provided names for the levels of the
  1772. corresponding index. You can provide a single string when
  1773. c_idx_nlevels ==1.
  1774. c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
  1775. r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
  1776. data_gen_f - a function f(row,col) which return the data value
  1777. at that position, the default generator used yields values of the form
  1778. "RxCy" based on position.
  1779. c_ndupe_l, r_ndupe_l - list of integers, determines the number
  1780. of duplicates for each label at a given level of the corresponding
  1781. index. The default `None` value produces a multiplicity of 1 across
  1782. all levels, i.e. a unique index. Will accept a partial list of length
  1783. N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
  1784. nrows/ncol, the last label might have lower multiplicity.
  1785. dtype - passed to the DataFrame constructor as is, in case you wish to
  1786. have more control in conjuncion with a custom `data_gen_f`
  1787. r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
  1788. If idx_type is not None, `idx_nlevels` must be 1.
  1789. "i"/"f" creates an integer/float index,
  1790. "s"/"u" creates a string/unicode index
  1791. "dt" create a datetime index.
  1792. "td" create a timedelta index.
  1793. if unspecified, string labels will be generated.
  1794. Examples:
  1795. # 5 row, 3 columns, default names on both, single index on both axis
  1796. >> makeCustomDataframe(5,3)
  1797. # make the data a random int between 1 and 100
  1798. >> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
  1799. # 2-level multiindex on rows with each label duplicated
  1800. # twice on first level, default names on both axis, single
  1801. # index on both axis
  1802. >> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
  1803. # DatetimeIndex on row, index with unicode labels on columns
  1804. # no names on either axis
  1805. >> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
  1806. r_idx_type="dt",c_idx_type="u")
  1807. # 4-level multindex on rows with names provided, 2-level multindex
  1808. # on columns with default labels and default names.
  1809. >> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
  1810. r_idx_names=["FEE","FI","FO","FAM"],
  1811. c_idx_nlevels=2)
  1812. >> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
  1813. """
  1814. assert c_idx_nlevels > 0
  1815. assert r_idx_nlevels > 0
  1816. assert r_idx_type is None or (r_idx_type in ('i', 'f', 's',
  1817. 'u', 'dt', 'p', 'td')
  1818. and r_idx_nlevels == 1)
  1819. assert c_idx_type is None or (c_idx_type in ('i', 'f', 's',
  1820. 'u', 'dt', 'p', 'td')
  1821. and c_idx_nlevels == 1)
  1822. columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',
  1823. names=c_idx_names, ndupe_l=c_ndupe_l,
  1824. idx_type=c_idx_type)
  1825. index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R',
  1826. names=r_idx_names, ndupe_l=r_ndupe_l,
  1827. idx_type=r_idx_type)
  1828. # by default, generate data based on location
  1829. if data_gen_f is None:
  1830. data_gen_f = lambda r, c: "R{rows}C{cols}".format(rows=r, cols=c)
  1831. data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
  1832. return DataFrame(data, index, columns, dtype=dtype)
  1833. def _create_missing_idx(nrows, ncols, density, random_state=None):
  1834. if random_state is None:
  1835. random_state = np.random
  1836. else:
  1837. random_state = np.random.RandomState(random_state)
  1838. # below is cribbed from scipy.sparse
  1839. size = int(np.round((1 - density) * nrows * ncols))
  1840. # generate a few more to ensure unique values
  1841. min_rows = 5
  1842. fac = 1.02
  1843. extra_size = min(size + min_rows, fac * size)
  1844. def _gen_unique_rand(rng, _extra_size):
  1845. ind = rng.rand(int(_extra_size))
  1846. return np.unique(np.floor(ind * nrows * ncols))[:size]
  1847. ind = _gen_unique_rand(random_state, extra_size)
  1848. while ind.size < size:
  1849. extra_size *= 1.05
  1850. ind = _gen_unique_rand(random_state, extra_size)
  1851. j = np.floor(ind * 1. / nrows).astype(int)
  1852. i = (ind - j * nrows).astype(int)
  1853. return i.tolist(), j.tolist()
  1854. def makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None,
  1855. c_idx_names=True, r_idx_names=True,
  1856. c_idx_nlevels=1, r_idx_nlevels=1,
  1857. data_gen_f=None,
  1858. c_ndupe_l=None, r_ndupe_l=None, dtype=None,
  1859. c_idx_type=None, r_idx_type=None):
  1860. """
  1861. Parameters
  1862. ----------
  1863. Density : float, optional
  1864. Float in (0, 1) that gives the percentage of non-missing numbers in
  1865. the DataFrame.
  1866. random_state : {np.random.RandomState, int}, optional
  1867. Random number generator or random seed.
  1868. See makeCustomDataframe for descriptions of the rest of the parameters.
  1869. """
  1870. df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names,
  1871. r_idx_names=r_idx_names,
  1872. c_idx_nlevels=c_idx_nlevels,
  1873. r_idx_nlevels=r_idx_nlevels,
  1874. data_gen_f=data_gen_f,
  1875. c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l,
  1876. dtype=dtype, c_idx_type=c_idx_type,
  1877. r_idx_type=r_idx_type)
  1878. i, j = _create_missing_idx(nrows, ncols, density, random_state)
  1879. df.values[i, j] = np.nan
  1880. return df
  1881. def makeMissingDataframe(density=.9, random_state=None):
  1882. df = makeDataFrame()
  1883. i, j = _create_missing_idx(*df.shape, density=density,
  1884. random_state=random_state)
  1885. df.values[i, j] = np.nan
  1886. return df
  1887. def add_nans(panel):
  1888. I, J, N = panel.shape
  1889. for i, item in enumerate(panel.items):
  1890. dm = panel[item]
  1891. for j, col in enumerate(dm.columns):
  1892. dm[col][:i + j] = np.NaN
  1893. return panel
  1894. class TestSubDict(dict):
  1895. def __init__(self, *args, **kwargs):
  1896. dict.__init__(self, *args, **kwargs)
  1897. def optional_args(decorator):
  1898. """allows a decorator to take optional positional and keyword arguments.
  1899. Assumes that taking a single, callable, positional argument means that
  1900. it is decorating a function, i.e. something like this::
  1901. @my_decorator
  1902. def function(): pass
  1903. Calls decorator with decorator(f, *args, **kwargs)"""
  1904. @wraps(decorator)
  1905. def wrapper(*args, **kwargs):
  1906. def dec(f):
  1907. return decorator(f, *args, **kwargs)
  1908. is_decorating = not kwargs and len(args) == 1 and callable(args[0])
  1909. if is_decorating:
  1910. f = args[0]
  1911. args = []
  1912. return dec(f)
  1913. else:
  1914. return dec
  1915. return wrapper
  1916. # skip tests on exceptions with this message
  1917. _network_error_messages = (
  1918. # 'urlopen error timed out',
  1919. # 'timeout: timed out',
  1920. # 'socket.timeout: timed out',
  1921. 'timed out',
  1922. 'Server Hangup',
  1923. 'HTTP Error 503: Service Unavailable',
  1924. '502: Proxy Error',
  1925. 'HTTP Error 502: internal error',
  1926. 'HTTP Error 502',
  1927. 'HTTP Error 503',
  1928. 'HTTP Error 403',
  1929. 'HTTP Error 400',
  1930. 'Temporary failure in name resolution',
  1931. 'Name or service not known',
  1932. 'Connection refused',
  1933. 'certificate verify',
  1934. )
  1935. # or this e.errno/e.reason.errno
  1936. _network_errno_vals = (
  1937. 101, # Network is unreachable
  1938. 111, # Connection refused
  1939. 110, # Connection timed out
  1940. 104, # Connection reset Error
  1941. 54, # Connection reset by peer
  1942. 60, # urllib.error.URLError: [Errno 60] Connection timed out
  1943. )
  1944. # Both of the above shouldn't mask real issues such as 404's
  1945. # or refused connections (changed DNS).
  1946. # But some tests (test_data yahoo) contact incredibly flakey
  1947. # servers.
  1948. # and conditionally raise on these exception types
  1949. _network_error_classes = (IOError, httplib.HTTPException)
  1950. if PY3:
  1951. _network_error_classes += (TimeoutError,) # noqa
  1952. def can_connect(url, error_classes=_network_error_classes):
  1953. """Try to connect to the given url. True if succeeds, False if IOError
  1954. raised
  1955. Parameters
  1956. ----------
  1957. url : basestring
  1958. The URL to try to connect to
  1959. Returns
  1960. -------
  1961. connectable : bool
  1962. Return True if no IOError (unable to connect) or URLError (bad url) was
  1963. raised
  1964. """
  1965. try:
  1966. with urlopen(url):
  1967. pass
  1968. except error_classes:
  1969. return False
  1970. else:
  1971. return True
  1972. @optional_args
  1973. def network(t, url="http://www.google.com",
  1974. raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
  1975. check_before_test=False,
  1976. error_classes=_network_error_classes,
  1977. skip_errnos=_network_errno_vals,
  1978. _skip_on_messages=_network_error_messages,
  1979. ):
  1980. """
  1981. Label a test as requiring network connection and, if an error is
  1982. encountered, only raise if it does not find a network connection.
  1983. In comparison to ``network``, this assumes an added contract to your test:
  1984. you must assert that, under normal conditions, your test will ONLY fail if
  1985. it does not have network connectivity.
  1986. You can call this in 3 ways: as a standard decorator, with keyword
  1987. arguments, or with a positional argument that is the url to check.
  1988. Parameters
  1989. ----------
  1990. t : callable
  1991. The test requiring network connectivity.
  1992. url : path
  1993. The url to test via ``pandas.io.common.urlopen`` to check
  1994. for connectivity. Defaults to 'http://www.google.com'.
  1995. raise_on_error : bool
  1996. If True, never catches errors.
  1997. check_before_test : bool
  1998. If True, checks connectivity before running the test case.
  1999. error_classes : tuple or Exception
  2000. error classes to ignore. If not in ``error_classes``, raises the error.
  2001. defaults to IOError. Be careful about changing the error classes here.
  2002. skip_errnos : iterable of int
  2003. Any exception that has .errno or .reason.erno set to one
  2004. of these values will be skipped with an appropriate
  2005. message.
  2006. _skip_on_messages: iterable of string
  2007. any exception e for which one of the strings is
  2008. a substring of str(e) will be skipped with an appropriate
  2009. message. Intended to suppress errors where an errno isn't available.
  2010. Notes
  2011. -----
  2012. * ``raise_on_error`` supercedes ``check_before_test``
  2013. Returns
  2014. -------
  2015. t : callable
  2016. The decorated test ``t``, with checks for connectivity errors.
  2017. Example
  2018. -------
  2019. Tests decorated with @network will fail if it's possible to make a network
  2020. connection to another URL (defaults to google.com)::
  2021. >>> from pandas.util.testing import network
  2022. >>> from pandas.io.common import urlopen
  2023. >>> @network
  2024. ... def test_network():
  2025. ... with urlopen("rabbit://bonanza.com"):
  2026. ... pass
  2027. Traceback
  2028. ...
  2029. URLError: <urlopen error unknown url type: rabit>
  2030. You can specify alternative URLs::
  2031. >>> @network("http://www.yahoo.com")
  2032. ... def test_something_with_yahoo():
  2033. ... raise IOError("Failure Message")
  2034. >>> test_something_with_yahoo()
  2035. Traceback (most recent call last):
  2036. ...
  2037. IOError: Failure Message
  2038. If you set check_before_test, it will check the url first and not run the
  2039. test on failure::
  2040. >>> @network("failing://url.blaher", check_before_test=True)
  2041. ... def test_something():
  2042. ... print("I ran!")
  2043. ... raise ValueError("Failure")
  2044. >>> test_something()
  2045. Traceback (most recent call last):
  2046. ...
  2047. Errors not related to networking will always be raised.
  2048. """
  2049. from pytest import skip
  2050. t.network = True
  2051. @compat.wraps(t)
  2052. def wrapper(*args, **kwargs):
  2053. if check_before_test and not raise_on_error:
  2054. if not can_connect(url, error_classes):
  2055. skip()
  2056. try:
  2057. return t(*args, **kwargs)
  2058. except Exception as e:
  2059. errno = getattr(e, 'errno', None)
  2060. if not errno and hasattr(errno, "reason"):
  2061. errno = getattr(e.reason, 'errno', None)
  2062. if errno in skip_errnos:
  2063. skip("Skipping test due to known errno"
  2064. " and error {error}".format(error=e))
  2065. try:
  2066. e_str = traceback.format_exc(e)
  2067. except Exception:
  2068. e_str = str(e)
  2069. if any(m.lower() in e_str.lower() for m in _skip_on_messages):
  2070. skip("Skipping test because exception "
  2071. "message is known and error {error}".format(error=e))
  2072. if not isinstance(e, error_classes):
  2073. raise
  2074. if raise_on_error or can_connect(url, error_classes):
  2075. raise
  2076. else:
  2077. skip("Skipping test due to lack of connectivity"
  2078. " and error {error}".format(error=e))
  2079. return wrapper
  2080. with_connectivity_check = network
  2081. def assert_raises_regex(_exception, _regexp, _callable=None,
  2082. *args, **kwargs):
  2083. r"""
  2084. Check that the specified Exception is raised and that the error message
  2085. matches a given regular expression pattern. This may be a regular
  2086. expression object or a string containing a regular expression suitable
  2087. for use by `re.search()`. This is a port of the `assertRaisesRegexp`
  2088. function from unittest in Python 2.7.
  2089. .. deprecated:: 0.24.0
  2090. Use `pytest.raises` instead.
  2091. Examples
  2092. --------
  2093. >>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ')
  2094. >>> import re
  2095. >>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ')
  2096. If an exception of a different type is raised, it bubbles up.
  2097. >>> assert_raises_regex(TypeError, 'literal', int, 'XYZ')
  2098. Traceback (most recent call last):
  2099. ...
  2100. ValueError: invalid literal for int() with base 10: 'XYZ'
  2101. >>> dct = dict()
  2102. >>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple')
  2103. Traceback (most recent call last):
  2104. ...
  2105. AssertionError: "pear" does not match "'apple'"
  2106. You can also use this in a with statement.
  2107. >>> with assert_raises_regex(TypeError, r'unsupported operand type\(s\)'):
  2108. ... 1 + {}
  2109. >>> with assert_raises_regex(TypeError, 'banana'):
  2110. ... 'apple'[0] = 'b'
  2111. Traceback (most recent call last):
  2112. ...
  2113. AssertionError: "banana" does not match "'str' object does not support \
  2114. item assignment"
  2115. """
  2116. warnings.warn(("assert_raises_regex has been deprecated and will "
  2117. "be removed in the next release. Please use "
  2118. "`pytest.raises` instead."), FutureWarning, stacklevel=2)
  2119. manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)
  2120. if _callable is not None:
  2121. with manager:
  2122. _callable(*args, **kwargs)
  2123. else:
  2124. return manager
  2125. class _AssertRaisesContextmanager(object):
  2126. """
  2127. Context manager behind `assert_raises_regex`.
  2128. """
  2129. def __init__(self, exception, regexp=None):
  2130. """
  2131. Initialize an _AssertRaisesContextManager instance.
  2132. Parameters
  2133. ----------
  2134. exception : class
  2135. The expected Exception class.
  2136. regexp : str, default None
  2137. The regex to compare against the Exception message.
  2138. """
  2139. self.exception = exception
  2140. if regexp is not None and not hasattr(regexp, "search"):
  2141. regexp = re.compile(regexp, re.DOTALL)
  2142. self.regexp = regexp
  2143. def __enter__(self):
  2144. return self
  2145. def __exit__(self, exc_type, exc_value, trace_back):
  2146. expected = self.exception
  2147. if not exc_type:
  2148. exp_name = getattr(expected, "__name__", str(expected))
  2149. raise AssertionError("{name} not raised.".format(name=exp_name))
  2150. return self.exception_matches(exc_type, exc_value, trace_back)
  2151. def exception_matches(self, exc_type, exc_value, trace_back):
  2152. """
  2153. Check that the Exception raised matches the expected Exception
  2154. and expected error message regular expression.
  2155. Parameters
  2156. ----------
  2157. exc_type : class
  2158. The type of Exception raised.
  2159. exc_value : Exception
  2160. The instance of `exc_type` raised.
  2161. trace_back : stack trace object
  2162. The traceback object associated with `exc_value`.
  2163. Returns
  2164. -------
  2165. is_matched : bool
  2166. Whether or not the Exception raised matches the expected
  2167. Exception class and expected error message regular expression.
  2168. Raises
  2169. ------
  2170. AssertionError : The error message provided does not match
  2171. the expected error message regular expression.
  2172. """
  2173. if issubclass(exc_type, self.exception):
  2174. if self.regexp is not None:
  2175. val = str(exc_value)
  2176. if not self.regexp.search(val):
  2177. msg = '"{pat}" does not match "{val}"'.format(
  2178. pat=self.regexp.pattern, val=val)
  2179. e = AssertionError(msg)
  2180. raise_with_traceback(e, trace_back)
  2181. return True
  2182. else:
  2183. # Failed, so allow Exception to bubble up.
  2184. return False
  2185. @contextmanager
  2186. def assert_produces_warning(expected_warning=Warning, filter_level="always",
  2187. clear=None, check_stacklevel=True):
  2188. """
  2189. Context manager for running code expected to either raise a specific
  2190. warning, or not raise any warnings. Verifies that the code raises the
  2191. expected warning, and that it does not raise any other unexpected
  2192. warnings. It is basically a wrapper around ``warnings.catch_warnings``.
  2193. Parameters
  2194. ----------
  2195. expected_warning : {Warning, False, None}, default Warning
  2196. The type of Exception raised. ``exception.Warning`` is the base
  2197. class for all warnings. To check that no warning is returned,
  2198. specify ``False`` or ``None``.
  2199. filter_level : str, default "always"
  2200. Specifies whether warnings are ignored, displayed, or turned
  2201. into errors.
  2202. Valid values are:
  2203. * "error" - turns matching warnings into exceptions
  2204. * "ignore" - discard the warning
  2205. * "always" - always emit a warning
  2206. * "default" - print the warning the first time it is generated
  2207. from each location
  2208. * "module" - print the warning the first time it is generated
  2209. from each module
  2210. * "once" - print the warning the first time it is generated
  2211. clear : str, default None
  2212. If not ``None`` then remove any previously raised warnings from
  2213. the ``__warningsregistry__`` to ensure that no warning messages are
  2214. suppressed by this context manager. If ``None`` is specified,
  2215. the ``__warningsregistry__`` keeps track of which warnings have been
  2216. shown, and does not show them again.
  2217. check_stacklevel : bool, default True
  2218. If True, displays the line that called the function containing
  2219. the warning to show were the function is called. Otherwise, the
  2220. line that implements the function is displayed.
  2221. Examples
  2222. --------
  2223. >>> import warnings
  2224. >>> with assert_produces_warning():
  2225. ... warnings.warn(UserWarning())
  2226. ...
  2227. >>> with assert_produces_warning(False):
  2228. ... warnings.warn(RuntimeWarning())
  2229. ...
  2230. Traceback (most recent call last):
  2231. ...
  2232. AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
  2233. >>> with assert_produces_warning(UserWarning):
  2234. ... warnings.warn(RuntimeWarning())
  2235. Traceback (most recent call last):
  2236. ...
  2237. AssertionError: Did not see expected warning of class 'UserWarning'.
  2238. ..warn:: This is *not* thread-safe.
  2239. """
  2240. __tracebackhide__ = True
  2241. with warnings.catch_warnings(record=True) as w:
  2242. if clear is not None:
  2243. # make sure that we are clearing these warnings
  2244. # if they have happened before
  2245. # to guarantee that we will catch them
  2246. if not is_list_like(clear):
  2247. clear = [clear]
  2248. for m in clear:
  2249. try:
  2250. m.__warningregistry__.clear()
  2251. except Exception:
  2252. pass
  2253. saw_warning = False
  2254. warnings.simplefilter(filter_level)
  2255. yield w
  2256. extra_warnings = []
  2257. for actual_warning in w:
  2258. if (expected_warning and issubclass(actual_warning.category,
  2259. expected_warning)):
  2260. saw_warning = True
  2261. if check_stacklevel and issubclass(actual_warning.category,
  2262. (FutureWarning,
  2263. DeprecationWarning)):
  2264. from inspect import getframeinfo, stack
  2265. caller = getframeinfo(stack()[2][0])
  2266. msg = ("Warning not set with correct stacklevel. "
  2267. "File where warning is raised: {actual} != "
  2268. "{caller}. Warning message: {message}"
  2269. ).format(actual=actual_warning.filename,
  2270. caller=caller.filename,
  2271. message=actual_warning.message)
  2272. assert actual_warning.filename == caller.filename, msg
  2273. else:
  2274. extra_warnings.append((actual_warning.category.__name__,
  2275. actual_warning.message,
  2276. actual_warning.filename,
  2277. actual_warning.lineno))
  2278. if expected_warning:
  2279. msg = "Did not see expected warning of class {name!r}.".format(
  2280. name=expected_warning.__name__)
  2281. assert saw_warning, msg
  2282. assert not extra_warnings, ("Caused unexpected warning(s): {extra!r}."
  2283. ).format(extra=extra_warnings)
  2284. class RNGContext(object):
  2285. """
  2286. Context manager to set the numpy random number generator speed. Returns
  2287. to the original value upon exiting the context manager.
  2288. Parameters
  2289. ----------
  2290. seed : int
  2291. Seed for numpy.random.seed
  2292. Examples
  2293. --------
  2294. with RNGContext(42):
  2295. np.random.randn()
  2296. """
  2297. def __init__(self, seed):
  2298. self.seed = seed
  2299. def __enter__(self):
  2300. self.start_state = np.random.get_state()
  2301. np.random.seed(self.seed)
  2302. def __exit__(self, exc_type, exc_value, traceback):
  2303. np.random.set_state(self.start_state)
  2304. @contextmanager
  2305. def with_csv_dialect(name, **kwargs):
  2306. """
  2307. Context manager to temporarily register a CSV dialect for parsing CSV.
  2308. Parameters
  2309. ----------
  2310. name : str
  2311. The name of the dialect.
  2312. kwargs : mapping
  2313. The parameters for the dialect.
  2314. Raises
  2315. ------
  2316. ValueError : the name of the dialect conflicts with a builtin one.
  2317. See Also
  2318. --------
  2319. csv : Python's CSV library.
  2320. """
  2321. import csv
  2322. _BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
  2323. if name in _BUILTIN_DIALECTS:
  2324. raise ValueError("Cannot override builtin dialect.")
  2325. csv.register_dialect(name, **kwargs)
  2326. yield
  2327. csv.unregister_dialect(name)
  2328. @contextmanager
  2329. def use_numexpr(use, min_elements=None):
  2330. from pandas.core.computation import expressions as expr
  2331. if min_elements is None:
  2332. min_elements = expr._MIN_ELEMENTS
  2333. olduse = expr._USE_NUMEXPR
  2334. oldmin = expr._MIN_ELEMENTS
  2335. expr.set_use_numexpr(use)
  2336. expr._MIN_ELEMENTS = min_elements
  2337. yield
  2338. expr._MIN_ELEMENTS = oldmin
  2339. expr.set_use_numexpr(olduse)
  2340. def test_parallel(num_threads=2, kwargs_list=None):
  2341. """Decorator to run the same function multiple times in parallel.
  2342. Parameters
  2343. ----------
  2344. num_threads : int, optional
  2345. The number of times the function is run in parallel.
  2346. kwargs_list : list of dicts, optional
  2347. The list of kwargs to update original
  2348. function kwargs on different threads.
  2349. Notes
  2350. -----
  2351. This decorator does not pass the return value of the decorated function.
  2352. Original from scikit-image:
  2353. https://github.com/scikit-image/scikit-image/pull/1519
  2354. """
  2355. assert num_threads > 0
  2356. has_kwargs_list = kwargs_list is not None
  2357. if has_kwargs_list:
  2358. assert len(kwargs_list) == num_threads
  2359. import threading
  2360. def wrapper(func):
  2361. @wraps(func)
  2362. def inner(*args, **kwargs):
  2363. if has_kwargs_list:
  2364. update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
  2365. else:
  2366. update_kwargs = lambda i: kwargs
  2367. threads = []
  2368. for i in range(num_threads):
  2369. updated_kwargs = update_kwargs(i)
  2370. thread = threading.Thread(target=func, args=args,
  2371. kwargs=updated_kwargs)
  2372. threads.append(thread)
  2373. for thread in threads:
  2374. thread.start()
  2375. for thread in threads:
  2376. thread.join()
  2377. return inner
  2378. return wrapper
  2379. class SubclassedSeries(Series):
  2380. _metadata = ['testattr', 'name']
  2381. @property
  2382. def _constructor(self):
  2383. return SubclassedSeries
  2384. @property
  2385. def _constructor_expanddim(self):
  2386. return SubclassedDataFrame
  2387. class SubclassedDataFrame(DataFrame):
  2388. _metadata = ['testattr']
  2389. @property
  2390. def _constructor(self):
  2391. return SubclassedDataFrame
  2392. @property
  2393. def _constructor_sliced(self):
  2394. return SubclassedSeries
  2395. class SubclassedSparseSeries(pd.SparseSeries):
  2396. _metadata = ['testattr']
  2397. @property
  2398. def _constructor(self):
  2399. return SubclassedSparseSeries
  2400. @property
  2401. def _constructor_expanddim(self):
  2402. return SubclassedSparseDataFrame
  2403. class SubclassedSparseDataFrame(pd.SparseDataFrame):
  2404. _metadata = ['testattr']
  2405. @property
  2406. def _constructor(self):
  2407. return SubclassedSparseDataFrame
  2408. @property
  2409. def _constructor_sliced(self):
  2410. return SubclassedSparseSeries
  2411. class SubclassedCategorical(Categorical):
  2412. @property
  2413. def _constructor(self):
  2414. return SubclassedCategorical
  2415. @contextmanager
  2416. def set_timezone(tz):
  2417. """Context manager for temporarily setting a timezone.
  2418. Parameters
  2419. ----------
  2420. tz : str
  2421. A string representing a valid timezone.
  2422. Examples
  2423. --------
  2424. >>> from datetime import datetime
  2425. >>> from dateutil.tz import tzlocal
  2426. >>> tzlocal().tzname(datetime.now())
  2427. 'IST'
  2428. >>> with set_timezone('US/Eastern'):
  2429. ... tzlocal().tzname(datetime.now())
  2430. ...
  2431. 'EDT'
  2432. """
  2433. import os
  2434. import time
  2435. def setTZ(tz):
  2436. if tz is None:
  2437. try:
  2438. del os.environ['TZ']
  2439. except KeyError:
  2440. pass
  2441. else:
  2442. os.environ['TZ'] = tz
  2443. time.tzset()
  2444. orig_tz = os.environ.get('TZ')
  2445. setTZ(tz)
  2446. try:
  2447. yield
  2448. finally:
  2449. setTZ(orig_tz)
  2450. def _make_skipna_wrapper(alternative, skipna_alternative=None):
  2451. """Create a function for calling on an array.
  2452. Parameters
  2453. ----------
  2454. alternative : function
  2455. The function to be called on the array with no NaNs.
  2456. Only used when 'skipna_alternative' is None.
  2457. skipna_alternative : function
  2458. The function to be called on the original array
  2459. Returns
  2460. -------
  2461. skipna_wrapper : function
  2462. """
  2463. if skipna_alternative:
  2464. def skipna_wrapper(x):
  2465. return skipna_alternative(x.values)
  2466. else:
  2467. def skipna_wrapper(x):
  2468. nona = x.dropna()
  2469. if len(nona) == 0:
  2470. return np.nan
  2471. return alternative(nona)
  2472. return skipna_wrapper
  2473. def convert_rows_list_to_csv_str(rows_list):
  2474. """
  2475. Convert list of CSV rows to single CSV-formatted string for current OS.
  2476. This method is used for creating expected value of to_csv() method.
  2477. Parameters
  2478. ----------
  2479. rows_list : list
  2480. The list of string. Each element represents the row of csv.
  2481. Returns
  2482. -------
  2483. expected : string
  2484. Expected output of to_csv() in current OS
  2485. """
  2486. sep = os.linesep
  2487. expected = sep.join(rows_list) + sep
  2488. return expected