sql.py 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597
  1. # -*- coding: utf-8 -*-
  2. """
  3. Collection of query wrappers / abstractions to both facilitate data
  4. retrieval and to reduce dependency on DB-specific API.
  5. """
  6. from __future__ import division, print_function
  7. from contextlib import contextmanager
  8. from datetime import date, datetime, time
  9. from functools import partial
  10. import re
  11. import warnings
  12. import numpy as np
  13. import pandas._libs.lib as lib
  14. from pandas.compat import (
  15. map, raise_with_traceback, string_types, text_type, zip)
  16. from pandas.core.dtypes.common import (
  17. is_datetime64tz_dtype, is_dict_like, is_list_like)
  18. from pandas.core.dtypes.dtypes import DatetimeTZDtype
  19. from pandas.core.dtypes.missing import isna
  20. from pandas.core.api import DataFrame, Series
  21. from pandas.core.base import PandasObject
  22. from pandas.core.tools.datetimes import to_datetime
  23. class SQLAlchemyRequired(ImportError):
  24. pass
  25. class DatabaseError(IOError):
  26. pass
  27. # -----------------------------------------------------------------------------
  28. # -- Helper functions
  29. _SQLALCHEMY_INSTALLED = None
  30. def _is_sqlalchemy_connectable(con):
  31. global _SQLALCHEMY_INSTALLED
  32. if _SQLALCHEMY_INSTALLED is None:
  33. try:
  34. import sqlalchemy
  35. _SQLALCHEMY_INSTALLED = True
  36. from distutils.version import LooseVersion
  37. ver = sqlalchemy.__version__
  38. # For sqlalchemy versions < 0.8.2, the BIGINT type is recognized
  39. # for a sqlite engine, which results in a warning when trying to
  40. # read/write a DataFrame with int64 values. (GH7433)
  41. if LooseVersion(ver) < LooseVersion('0.8.2'):
  42. from sqlalchemy import BigInteger
  43. from sqlalchemy.ext.compiler import compiles
  44. @compiles(BigInteger, 'sqlite')
  45. def compile_big_int_sqlite(type_, compiler, **kw):
  46. return 'INTEGER'
  47. except ImportError:
  48. _SQLALCHEMY_INSTALLED = False
  49. if _SQLALCHEMY_INSTALLED:
  50. import sqlalchemy
  51. return isinstance(con, sqlalchemy.engine.Connectable)
  52. else:
  53. return False
  54. def _convert_params(sql, params):
  55. """Convert SQL and params args to DBAPI2.0 compliant format."""
  56. args = [sql]
  57. if params is not None:
  58. if hasattr(params, 'keys'): # test if params is a mapping
  59. args += [params]
  60. else:
  61. args += [list(params)]
  62. return args
  63. def _process_parse_dates_argument(parse_dates):
  64. """Process parse_dates argument for read_sql functions"""
  65. # handle non-list entries for parse_dates gracefully
  66. if parse_dates is True or parse_dates is None or parse_dates is False:
  67. parse_dates = []
  68. elif not hasattr(parse_dates, '__iter__'):
  69. parse_dates = [parse_dates]
  70. return parse_dates
  71. def _handle_date_column(col, utc=None, format=None):
  72. if isinstance(format, dict):
  73. return to_datetime(col, errors='ignore', **format)
  74. else:
  75. # Allow passing of formatting string for integers
  76. # GH17855
  77. if format is None and (issubclass(col.dtype.type, np.floating) or
  78. issubclass(col.dtype.type, np.integer)):
  79. format = 's'
  80. if format in ['D', 'd', 'h', 'm', 's', 'ms', 'us', 'ns']:
  81. return to_datetime(col, errors='coerce', unit=format, utc=utc)
  82. elif is_datetime64tz_dtype(col):
  83. # coerce to UTC timezone
  84. # GH11216
  85. return to_datetime(col, utc=True)
  86. else:
  87. return to_datetime(col, errors='coerce', format=format, utc=utc)
  88. def _parse_date_columns(data_frame, parse_dates):
  89. """
  90. Force non-datetime columns to be read as such.
  91. Supports both string formatted and integer timestamp columns.
  92. """
  93. parse_dates = _process_parse_dates_argument(parse_dates)
  94. # we want to coerce datetime64_tz dtypes for now to UTC
  95. # we could in theory do a 'nice' conversion from a FixedOffset tz
  96. # GH11216
  97. for col_name, df_col in data_frame.iteritems():
  98. if is_datetime64tz_dtype(df_col) or col_name in parse_dates:
  99. try:
  100. fmt = parse_dates[col_name]
  101. except TypeError:
  102. fmt = None
  103. data_frame[col_name] = _handle_date_column(df_col, format=fmt)
  104. return data_frame
  105. def _wrap_result(data, columns, index_col=None, coerce_float=True,
  106. parse_dates=None):
  107. """Wrap result set of query in a DataFrame."""
  108. frame = DataFrame.from_records(data, columns=columns,
  109. coerce_float=coerce_float)
  110. frame = _parse_date_columns(frame, parse_dates)
  111. if index_col is not None:
  112. frame.set_index(index_col, inplace=True)
  113. return frame
  114. def execute(sql, con, cur=None, params=None):
  115. """
  116. Execute the given SQL query using the provided connection object.
  117. Parameters
  118. ----------
  119. sql : string
  120. SQL query to be executed.
  121. con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
  122. Using SQLAlchemy makes it possible to use any DB supported by the
  123. library.
  124. If a DBAPI2 object, only sqlite3 is supported.
  125. cur : deprecated, cursor is obtained from connection, default: None
  126. params : list or tuple, optional, default: None
  127. List of parameters to pass to execute method.
  128. Returns
  129. -------
  130. Results Iterable
  131. """
  132. if cur is None:
  133. pandas_sql = pandasSQL_builder(con)
  134. else:
  135. pandas_sql = pandasSQL_builder(cur, is_cursor=True)
  136. args = _convert_params(sql, params)
  137. return pandas_sql.execute(*args)
  138. # -----------------------------------------------------------------------------
  139. # -- Read and write to DataFrames
  140. def read_sql_table(table_name, con, schema=None, index_col=None,
  141. coerce_float=True, parse_dates=None, columns=None,
  142. chunksize=None):
  143. """Read SQL database table into a DataFrame.
  144. Given a table name and a SQLAlchemy connectable, returns a DataFrame.
  145. This function does not support DBAPI connections.
  146. Parameters
  147. ----------
  148. table_name : string
  149. Name of SQL table in database.
  150. con : SQLAlchemy connectable (or database string URI)
  151. SQLite DBAPI connection mode not supported.
  152. schema : string, default None
  153. Name of SQL schema in database to query (if database flavor
  154. supports this). Uses default schema if None (default).
  155. index_col : string or list of strings, optional, default: None
  156. Column(s) to set as index(MultiIndex).
  157. coerce_float : boolean, default True
  158. Attempts to convert values of non-string, non-numeric objects (like
  159. decimal.Decimal) to floating point. Can result in loss of Precision.
  160. parse_dates : list or dict, default: None
  161. - List of column names to parse as dates.
  162. - Dict of ``{column_name: format string}`` where format string is
  163. strftime compatible in case of parsing string times or is one of
  164. (D, s, ns, ms, us) in case of parsing integer timestamps.
  165. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  166. to the keyword arguments of :func:`pandas.to_datetime`
  167. Especially useful with databases without native Datetime support,
  168. such as SQLite.
  169. columns : list, default: None
  170. List of column names to select from SQL table
  171. chunksize : int, default None
  172. If specified, returns an iterator where `chunksize` is the number of
  173. rows to include in each chunk.
  174. Returns
  175. -------
  176. DataFrame
  177. See Also
  178. --------
  179. read_sql_query : Read SQL query into a DataFrame.
  180. read_sql
  181. Notes
  182. -----
  183. Any datetime values with time zone information will be converted to UTC.
  184. """
  185. con = _engine_builder(con)
  186. if not _is_sqlalchemy_connectable(con):
  187. raise NotImplementedError("read_sql_table only supported for "
  188. "SQLAlchemy connectable.")
  189. import sqlalchemy
  190. from sqlalchemy.schema import MetaData
  191. meta = MetaData(con, schema=schema)
  192. try:
  193. meta.reflect(only=[table_name], views=True)
  194. except sqlalchemy.exc.InvalidRequestError:
  195. raise ValueError("Table {name} not found".format(name=table_name))
  196. pandas_sql = SQLDatabase(con, meta=meta)
  197. table = pandas_sql.read_table(
  198. table_name, index_col=index_col, coerce_float=coerce_float,
  199. parse_dates=parse_dates, columns=columns, chunksize=chunksize)
  200. if table is not None:
  201. return table
  202. else:
  203. raise ValueError("Table {name} not found".format(name=table_name), con)
  204. def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
  205. parse_dates=None, chunksize=None):
  206. """Read SQL query into a DataFrame.
  207. Returns a DataFrame corresponding to the result set of the query
  208. string. Optionally provide an `index_col` parameter to use one of the
  209. columns as the index, otherwise default integer index will be used.
  210. Parameters
  211. ----------
  212. sql : string SQL query or SQLAlchemy Selectable (select or text object)
  213. SQL query to be executed.
  214. con : SQLAlchemy connectable(engine/connection), database string URI,
  215. or sqlite3 DBAPI2 connection
  216. Using SQLAlchemy makes it possible to use any DB supported by that
  217. library.
  218. If a DBAPI2 object, only sqlite3 is supported.
  219. index_col : string or list of strings, optional, default: None
  220. Column(s) to set as index(MultiIndex).
  221. coerce_float : boolean, default True
  222. Attempts to convert values of non-string, non-numeric objects (like
  223. decimal.Decimal) to floating point. Useful for SQL result sets.
  224. params : list, tuple or dict, optional, default: None
  225. List of parameters to pass to execute method. The syntax used
  226. to pass parameters is database driver dependent. Check your
  227. database driver documentation for which of the five syntax styles,
  228. described in PEP 249's paramstyle, is supported.
  229. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
  230. parse_dates : list or dict, default: None
  231. - List of column names to parse as dates.
  232. - Dict of ``{column_name: format string}`` where format string is
  233. strftime compatible in case of parsing string times, or is one of
  234. (D, s, ns, ms, us) in case of parsing integer timestamps.
  235. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  236. to the keyword arguments of :func:`pandas.to_datetime`
  237. Especially useful with databases without native Datetime support,
  238. such as SQLite.
  239. chunksize : int, default None
  240. If specified, return an iterator where `chunksize` is the number of
  241. rows to include in each chunk.
  242. Returns
  243. -------
  244. DataFrame
  245. See Also
  246. --------
  247. read_sql_table : Read SQL database table into a DataFrame.
  248. read_sql
  249. Notes
  250. -----
  251. Any datetime values with time zone information parsed via the `parse_dates`
  252. parameter will be converted to UTC.
  253. """
  254. pandas_sql = pandasSQL_builder(con)
  255. return pandas_sql.read_query(
  256. sql, index_col=index_col, params=params, coerce_float=coerce_float,
  257. parse_dates=parse_dates, chunksize=chunksize)
  258. def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
  259. parse_dates=None, columns=None, chunksize=None):
  260. """
  261. Read SQL query or database table into a DataFrame.
  262. This function is a convenience wrapper around ``read_sql_table`` and
  263. ``read_sql_query`` (for backward compatibility). It will delegate
  264. to the specific function depending on the provided input. A SQL query
  265. will be routed to ``read_sql_query``, while a database table name will
  266. be routed to ``read_sql_table``. Note that the delegated function might
  267. have more specific notes about their functionality not listed here.
  268. Parameters
  269. ----------
  270. sql : string or SQLAlchemy Selectable (select or text object)
  271. SQL query to be executed or a table name.
  272. con : SQLAlchemy connectable (engine/connection) or database string URI
  273. or DBAPI2 connection (fallback mode)
  274. Using SQLAlchemy makes it possible to use any DB supported by that
  275. library. If a DBAPI2 object, only sqlite3 is supported.
  276. index_col : string or list of strings, optional, default: None
  277. Column(s) to set as index(MultiIndex).
  278. coerce_float : boolean, default True
  279. Attempts to convert values of non-string, non-numeric objects (like
  280. decimal.Decimal) to floating point, useful for SQL result sets.
  281. params : list, tuple or dict, optional, default: None
  282. List of parameters to pass to execute method. The syntax used
  283. to pass parameters is database driver dependent. Check your
  284. database driver documentation for which of the five syntax styles,
  285. described in PEP 249's paramstyle, is supported.
  286. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
  287. parse_dates : list or dict, default: None
  288. - List of column names to parse as dates.
  289. - Dict of ``{column_name: format string}`` where format string is
  290. strftime compatible in case of parsing string times, or is one of
  291. (D, s, ns, ms, us) in case of parsing integer timestamps.
  292. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  293. to the keyword arguments of :func:`pandas.to_datetime`
  294. Especially useful with databases without native Datetime support,
  295. such as SQLite.
  296. columns : list, default: None
  297. List of column names to select from SQL table (only used when reading
  298. a table).
  299. chunksize : int, default None
  300. If specified, return an iterator where `chunksize` is the
  301. number of rows to include in each chunk.
  302. Returns
  303. -------
  304. DataFrame
  305. See Also
  306. --------
  307. read_sql_table : Read SQL database table into a DataFrame.
  308. read_sql_query : Read SQL query into a DataFrame.
  309. """
  310. pandas_sql = pandasSQL_builder(con)
  311. if isinstance(pandas_sql, SQLiteDatabase):
  312. return pandas_sql.read_query(
  313. sql, index_col=index_col, params=params,
  314. coerce_float=coerce_float, parse_dates=parse_dates,
  315. chunksize=chunksize)
  316. try:
  317. _is_table_name = pandas_sql.has_table(sql)
  318. except Exception:
  319. # using generic exception to catch errors from sql drivers (GH24988)
  320. _is_table_name = False
  321. if _is_table_name:
  322. pandas_sql.meta.reflect(only=[sql])
  323. return pandas_sql.read_table(
  324. sql, index_col=index_col, coerce_float=coerce_float,
  325. parse_dates=parse_dates, columns=columns, chunksize=chunksize)
  326. else:
  327. return pandas_sql.read_query(
  328. sql, index_col=index_col, params=params,
  329. coerce_float=coerce_float, parse_dates=parse_dates,
  330. chunksize=chunksize)
  331. def to_sql(frame, name, con, schema=None, if_exists='fail', index=True,
  332. index_label=None, chunksize=None, dtype=None, method=None):
  333. """
  334. Write records stored in a DataFrame to a SQL database.
  335. Parameters
  336. ----------
  337. frame : DataFrame, Series
  338. name : string
  339. Name of SQL table.
  340. con : SQLAlchemy connectable(engine/connection) or database string URI
  341. or sqlite3 DBAPI2 connection
  342. Using SQLAlchemy makes it possible to use any DB supported by that
  343. library.
  344. If a DBAPI2 object, only sqlite3 is supported.
  345. schema : string, default None
  346. Name of SQL schema in database to write to (if database flavor
  347. supports this). If None, use default schema (default).
  348. if_exists : {'fail', 'replace', 'append'}, default 'fail'
  349. - fail: If table exists, do nothing.
  350. - replace: If table exists, drop it, recreate it, and insert data.
  351. - append: If table exists, insert data. Create if does not exist.
  352. index : boolean, default True
  353. Write DataFrame index as a column.
  354. index_label : string or sequence, default None
  355. Column label for index column(s). If None is given (default) and
  356. `index` is True, then the index names are used.
  357. A sequence should be given if the DataFrame uses MultiIndex.
  358. chunksize : int, default None
  359. If not None, then rows will be written in batches of this size at a
  360. time. If None, all rows will be written at once.
  361. dtype : single SQLtype or dict of column name to SQL type, default None
  362. Optional specifying the datatype for columns. The SQL type should
  363. be a SQLAlchemy type, or a string for sqlite3 fallback connection.
  364. If all columns are of the same type, one single value can be used.
  365. method : {None, 'multi', callable}, default None
  366. Controls the SQL insertion clause used:
  367. - None : Uses standard SQL ``INSERT`` clause (one per row).
  368. - 'multi': Pass multiple values in a single ``INSERT`` clause.
  369. - callable with signature ``(pd_table, conn, keys, data_iter)``.
  370. Details and a sample callable implementation can be found in the
  371. section :ref:`insert method <io.sql.method>`.
  372. .. versionadded:: 0.24.0
  373. """
  374. if if_exists not in ('fail', 'replace', 'append'):
  375. raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
  376. pandas_sql = pandasSQL_builder(con, schema=schema)
  377. if isinstance(frame, Series):
  378. frame = frame.to_frame()
  379. elif not isinstance(frame, DataFrame):
  380. raise NotImplementedError("'frame' argument should be either a "
  381. "Series or a DataFrame")
  382. pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
  383. index_label=index_label, schema=schema,
  384. chunksize=chunksize, dtype=dtype, method=method)
  385. def has_table(table_name, con, schema=None):
  386. """
  387. Check if DataBase has named table.
  388. Parameters
  389. ----------
  390. table_name: string
  391. Name of SQL table.
  392. con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
  393. Using SQLAlchemy makes it possible to use any DB supported by that
  394. library.
  395. If a DBAPI2 object, only sqlite3 is supported.
  396. schema : string, default None
  397. Name of SQL schema in database to write to (if database flavor supports
  398. this). If None, use default schema (default).
  399. Returns
  400. -------
  401. boolean
  402. """
  403. pandas_sql = pandasSQL_builder(con, schema=schema)
  404. return pandas_sql.has_table(table_name)
  405. table_exists = has_table
  406. def _engine_builder(con):
  407. """
  408. Returns a SQLAlchemy engine from a URI (if con is a string)
  409. else it just return con without modifying it.
  410. """
  411. global _SQLALCHEMY_INSTALLED
  412. if isinstance(con, string_types):
  413. try:
  414. import sqlalchemy
  415. except ImportError:
  416. _SQLALCHEMY_INSTALLED = False
  417. else:
  418. con = sqlalchemy.create_engine(con)
  419. return con
  420. return con
  421. def pandasSQL_builder(con, schema=None, meta=None,
  422. is_cursor=False):
  423. """
  424. Convenience function to return the correct PandasSQL subclass based on the
  425. provided parameters.
  426. """
  427. # When support for DBAPI connections is removed,
  428. # is_cursor should not be necessary.
  429. con = _engine_builder(con)
  430. if _is_sqlalchemy_connectable(con):
  431. return SQLDatabase(con, schema=schema, meta=meta)
  432. elif isinstance(con, string_types):
  433. raise ImportError("Using URI string without sqlalchemy installed.")
  434. else:
  435. return SQLiteDatabase(con, is_cursor=is_cursor)
  436. class SQLTable(PandasObject):
  437. """
  438. For mapping Pandas tables to SQL tables.
  439. Uses fact that table is reflected by SQLAlchemy to
  440. do better type conversions.
  441. Also holds various flags needed to avoid having to
  442. pass them between functions all the time.
  443. """
  444. # TODO: support for multiIndex
  445. def __init__(self, name, pandas_sql_engine, frame=None, index=True,
  446. if_exists='fail', prefix='pandas', index_label=None,
  447. schema=None, keys=None, dtype=None):
  448. self.name = name
  449. self.pd_sql = pandas_sql_engine
  450. self.prefix = prefix
  451. self.frame = frame
  452. self.index = self._index_name(index, index_label)
  453. self.schema = schema
  454. self.if_exists = if_exists
  455. self.keys = keys
  456. self.dtype = dtype
  457. if frame is not None:
  458. # We want to initialize based on a dataframe
  459. self.table = self._create_table_setup()
  460. else:
  461. # no data provided, read-only mode
  462. self.table = self.pd_sql.get_table(self.name, self.schema)
  463. if self.table is None:
  464. raise ValueError(
  465. "Could not init table '{name}'".format(name=name))
  466. def exists(self):
  467. return self.pd_sql.has_table(self.name, self.schema)
  468. def sql_schema(self):
  469. from sqlalchemy.schema import CreateTable
  470. return str(CreateTable(self.table).compile(self.pd_sql.connectable))
  471. def _execute_create(self):
  472. # Inserting table into database, add to MetaData object
  473. self.table = self.table.tometadata(self.pd_sql.meta)
  474. self.table.create()
  475. def create(self):
  476. if self.exists():
  477. if self.if_exists == 'fail':
  478. raise ValueError(
  479. "Table '{name}' already exists.".format(name=self.name))
  480. elif self.if_exists == 'replace':
  481. self.pd_sql.drop_table(self.name, self.schema)
  482. self._execute_create()
  483. elif self.if_exists == 'append':
  484. pass
  485. else:
  486. raise ValueError(
  487. "'{0}' is not valid for if_exists".format(self.if_exists))
  488. else:
  489. self._execute_create()
  490. def _execute_insert(self, conn, keys, data_iter):
  491. """Execute SQL statement inserting data
  492. Parameters
  493. ----------
  494. conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
  495. keys : list of str
  496. Column names
  497. data_iter : generator of list
  498. Each item contains a list of values to be inserted
  499. """
  500. data = [dict(zip(keys, row)) for row in data_iter]
  501. conn.execute(self.table.insert(), data)
  502. def _execute_insert_multi(self, conn, keys, data_iter):
  503. """Alternative to _execute_insert for DBs support multivalue INSERT.
  504. Note: multi-value insert is usually faster for analytics DBs
  505. and tables containing a few columns
  506. but performance degrades quickly with increase of columns.
  507. """
  508. data = [dict(zip(keys, row)) for row in data_iter]
  509. conn.execute(self.table.insert(data))
  510. def insert_data(self):
  511. if self.index is not None:
  512. temp = self.frame.copy()
  513. temp.index.names = self.index
  514. try:
  515. temp.reset_index(inplace=True)
  516. except ValueError as err:
  517. raise ValueError(
  518. "duplicate name in index/columns: {0}".format(err))
  519. else:
  520. temp = self.frame
  521. column_names = list(map(text_type, temp.columns))
  522. ncols = len(column_names)
  523. data_list = [None] * ncols
  524. blocks = temp._data.blocks
  525. for b in blocks:
  526. if b.is_datetime:
  527. # return datetime.datetime objects
  528. if b.is_datetimetz:
  529. # GH 9086: Ensure we return datetimes with timezone info
  530. # Need to return 2-D data; DatetimeIndex is 1D
  531. d = b.values.to_pydatetime()
  532. d = np.expand_dims(d, axis=0)
  533. else:
  534. # convert to microsecond resolution for datetime.datetime
  535. d = b.values.astype('M8[us]').astype(object)
  536. else:
  537. d = np.array(b.get_values(), dtype=object)
  538. # replace NaN with None
  539. if b._can_hold_na:
  540. mask = isna(d)
  541. d[mask] = None
  542. for col_loc, col in zip(b.mgr_locs, d):
  543. data_list[col_loc] = col
  544. return column_names, data_list
  545. def insert(self, chunksize=None, method=None):
  546. # set insert method
  547. if method is None:
  548. exec_insert = self._execute_insert
  549. elif method == 'multi':
  550. exec_insert = self._execute_insert_multi
  551. elif callable(method):
  552. exec_insert = partial(method, self)
  553. else:
  554. raise ValueError('Invalid parameter `method`: {}'.format(method))
  555. keys, data_list = self.insert_data()
  556. nrows = len(self.frame)
  557. if nrows == 0:
  558. return
  559. if chunksize is None:
  560. chunksize = nrows
  561. elif chunksize == 0:
  562. raise ValueError('chunksize argument should be non-zero')
  563. chunks = int(nrows / chunksize) + 1
  564. with self.pd_sql.run_transaction() as conn:
  565. for i in range(chunks):
  566. start_i = i * chunksize
  567. end_i = min((i + 1) * chunksize, nrows)
  568. if start_i >= end_i:
  569. break
  570. chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
  571. exec_insert(conn, keys, chunk_iter)
  572. def _query_iterator(self, result, chunksize, columns, coerce_float=True,
  573. parse_dates=None):
  574. """Return generator through chunked result set."""
  575. while True:
  576. data = result.fetchmany(chunksize)
  577. if not data:
  578. break
  579. else:
  580. self.frame = DataFrame.from_records(
  581. data, columns=columns, coerce_float=coerce_float)
  582. self._harmonize_columns(parse_dates=parse_dates)
  583. if self.index is not None:
  584. self.frame.set_index(self.index, inplace=True)
  585. yield self.frame
  586. def read(self, coerce_float=True, parse_dates=None, columns=None,
  587. chunksize=None):
  588. if columns is not None and len(columns) > 0:
  589. from sqlalchemy import select
  590. cols = [self.table.c[n] for n in columns]
  591. if self.index is not None:
  592. [cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]]
  593. sql_select = select(cols)
  594. else:
  595. sql_select = self.table.select()
  596. result = self.pd_sql.execute(sql_select)
  597. column_names = result.keys()
  598. if chunksize is not None:
  599. return self._query_iterator(result, chunksize, column_names,
  600. coerce_float=coerce_float,
  601. parse_dates=parse_dates)
  602. else:
  603. data = result.fetchall()
  604. self.frame = DataFrame.from_records(
  605. data, columns=column_names, coerce_float=coerce_float)
  606. self._harmonize_columns(parse_dates=parse_dates)
  607. if self.index is not None:
  608. self.frame.set_index(self.index, inplace=True)
  609. return self.frame
  610. def _index_name(self, index, index_label):
  611. # for writing: index=True to include index in sql table
  612. if index is True:
  613. nlevels = self.frame.index.nlevels
  614. # if index_label is specified, set this as index name(s)
  615. if index_label is not None:
  616. if not isinstance(index_label, list):
  617. index_label = [index_label]
  618. if len(index_label) != nlevels:
  619. raise ValueError(
  620. "Length of 'index_label' should match number of "
  621. "levels, which is {0}".format(nlevels))
  622. else:
  623. return index_label
  624. # return the used column labels for the index columns
  625. if (nlevels == 1 and 'index' not in self.frame.columns and
  626. self.frame.index.name is None):
  627. return ['index']
  628. else:
  629. return [l if l is not None else "level_{0}".format(i)
  630. for i, l in enumerate(self.frame.index.names)]
  631. # for reading: index=(list of) string to specify column to set as index
  632. elif isinstance(index, string_types):
  633. return [index]
  634. elif isinstance(index, list):
  635. return index
  636. else:
  637. return None
  638. def _get_column_names_and_types(self, dtype_mapper):
  639. column_names_and_types = []
  640. if self.index is not None:
  641. for i, idx_label in enumerate(self.index):
  642. idx_type = dtype_mapper(
  643. self.frame.index._get_level_values(i))
  644. column_names_and_types.append((text_type(idx_label),
  645. idx_type, True))
  646. column_names_and_types += [
  647. (text_type(self.frame.columns[i]),
  648. dtype_mapper(self.frame.iloc[:, i]),
  649. False)
  650. for i in range(len(self.frame.columns))
  651. ]
  652. return column_names_and_types
  653. def _create_table_setup(self):
  654. from sqlalchemy import Table, Column, PrimaryKeyConstraint
  655. column_names_and_types = self._get_column_names_and_types(
  656. self._sqlalchemy_type
  657. )
  658. columns = [Column(name, typ, index=is_index)
  659. for name, typ, is_index in column_names_and_types]
  660. if self.keys is not None:
  661. if not is_list_like(self.keys):
  662. keys = [self.keys]
  663. else:
  664. keys = self.keys
  665. pkc = PrimaryKeyConstraint(*keys, name=self.name + '_pk')
  666. columns.append(pkc)
  667. schema = self.schema or self.pd_sql.meta.schema
  668. # At this point, attach to new metadata, only attach to self.meta
  669. # once table is created.
  670. from sqlalchemy.schema import MetaData
  671. meta = MetaData(self.pd_sql, schema=schema)
  672. return Table(self.name, meta, *columns, schema=schema)
  673. def _harmonize_columns(self, parse_dates=None):
  674. """
  675. Make the DataFrame's column types align with the SQL table
  676. column types.
  677. Need to work around limited NA value support. Floats are always
  678. fine, ints must always be floats if there are Null values.
  679. Booleans are hard because converting bool column with None replaces
  680. all Nones with false. Therefore only convert bool if there are no
  681. NA values.
  682. Datetimes should already be converted to np.datetime64 if supported,
  683. but here we also force conversion if required.
  684. """
  685. parse_dates = _process_parse_dates_argument(parse_dates)
  686. for sql_col in self.table.columns:
  687. col_name = sql_col.name
  688. try:
  689. df_col = self.frame[col_name]
  690. # Handle date parsing upfront; don't try to convert columns
  691. # twice
  692. if col_name in parse_dates:
  693. try:
  694. fmt = parse_dates[col_name]
  695. except TypeError:
  696. fmt = None
  697. self.frame[col_name] = _handle_date_column(
  698. df_col, format=fmt)
  699. continue
  700. # the type the dataframe column should have
  701. col_type = self._get_dtype(sql_col.type)
  702. if (col_type is datetime or col_type is date or
  703. col_type is DatetimeTZDtype):
  704. # Convert tz-aware Datetime SQL columns to UTC
  705. utc = col_type is DatetimeTZDtype
  706. self.frame[col_name] = _handle_date_column(df_col, utc=utc)
  707. elif col_type is float:
  708. # floats support NA, can always convert!
  709. self.frame[col_name] = df_col.astype(col_type, copy=False)
  710. elif len(df_col) == df_col.count():
  711. # No NA values, can convert ints and bools
  712. if col_type is np.dtype('int64') or col_type is bool:
  713. self.frame[col_name] = df_col.astype(
  714. col_type, copy=False)
  715. except KeyError:
  716. pass # this column not in results
  717. def _sqlalchemy_type(self, col):
  718. dtype = self.dtype or {}
  719. if col.name in dtype:
  720. return self.dtype[col.name]
  721. # Infer type of column, while ignoring missing values.
  722. # Needed for inserting typed data containing NULLs, GH 8778.
  723. col_type = lib.infer_dtype(col, skipna=True)
  724. from sqlalchemy.types import (BigInteger, Integer, Float,
  725. Text, Boolean,
  726. DateTime, Date, Time, TIMESTAMP)
  727. if col_type == 'datetime64' or col_type == 'datetime':
  728. # GH 9086: TIMESTAMP is the suggested type if the column contains
  729. # timezone information
  730. try:
  731. if col.dt.tz is not None:
  732. return TIMESTAMP(timezone=True)
  733. except AttributeError:
  734. # The column is actually a DatetimeIndex
  735. if col.tz is not None:
  736. return TIMESTAMP(timezone=True)
  737. return DateTime
  738. if col_type == 'timedelta64':
  739. warnings.warn("the 'timedelta' type is not supported, and will be "
  740. "written as integer values (ns frequency) to the "
  741. "database.", UserWarning, stacklevel=8)
  742. return BigInteger
  743. elif col_type == 'floating':
  744. if col.dtype == 'float32':
  745. return Float(precision=23)
  746. else:
  747. return Float(precision=53)
  748. elif col_type == 'integer':
  749. if col.dtype == 'int32':
  750. return Integer
  751. else:
  752. return BigInteger
  753. elif col_type == 'boolean':
  754. return Boolean
  755. elif col_type == 'date':
  756. return Date
  757. elif col_type == 'time':
  758. return Time
  759. elif col_type == 'complex':
  760. raise ValueError('Complex datatypes not supported')
  761. return Text
  762. def _get_dtype(self, sqltype):
  763. from sqlalchemy.types import (Integer, Float, Boolean, DateTime,
  764. Date, TIMESTAMP)
  765. if isinstance(sqltype, Float):
  766. return float
  767. elif isinstance(sqltype, Integer):
  768. # TODO: Refine integer size.
  769. return np.dtype('int64')
  770. elif isinstance(sqltype, TIMESTAMP):
  771. # we have a timezone capable type
  772. if not sqltype.timezone:
  773. return datetime
  774. return DatetimeTZDtype
  775. elif isinstance(sqltype, DateTime):
  776. # Caution: np.datetime64 is also a subclass of np.number.
  777. return datetime
  778. elif isinstance(sqltype, Date):
  779. return date
  780. elif isinstance(sqltype, Boolean):
  781. return bool
  782. return object
  783. class PandasSQL(PandasObject):
  784. """
  785. Subclasses Should define read_sql and to_sql.
  786. """
  787. def read_sql(self, *args, **kwargs):
  788. raise ValueError("PandasSQL must be created with an SQLAlchemy "
  789. "connectable or sqlite connection")
  790. def to_sql(self, *args, **kwargs):
  791. raise ValueError("PandasSQL must be created with an SQLAlchemy "
  792. "connectable or sqlite connection")
  793. class SQLDatabase(PandasSQL):
  794. """
  795. This class enables conversion between DataFrame and SQL databases
  796. using SQLAlchemy to handle DataBase abstraction.
  797. Parameters
  798. ----------
  799. engine : SQLAlchemy connectable
  800. Connectable to connect with the database. Using SQLAlchemy makes it
  801. possible to use any DB supported by that library.
  802. schema : string, default None
  803. Name of SQL schema in database to write to (if database flavor
  804. supports this). If None, use default schema (default).
  805. meta : SQLAlchemy MetaData object, default None
  806. If provided, this MetaData object is used instead of a newly
  807. created. This allows to specify database flavor specific
  808. arguments in the MetaData object.
  809. """
  810. def __init__(self, engine, schema=None, meta=None):
  811. self.connectable = engine
  812. if not meta:
  813. from sqlalchemy.schema import MetaData
  814. meta = MetaData(self.connectable, schema=schema)
  815. self.meta = meta
  816. @contextmanager
  817. def run_transaction(self):
  818. with self.connectable.begin() as tx:
  819. if hasattr(tx, 'execute'):
  820. yield tx
  821. else:
  822. yield self.connectable
  823. def execute(self, *args, **kwargs):
  824. """Simple passthrough to SQLAlchemy connectable"""
  825. return self.connectable.execute(*args, **kwargs)
  826. def read_table(self, table_name, index_col=None, coerce_float=True,
  827. parse_dates=None, columns=None, schema=None,
  828. chunksize=None):
  829. """Read SQL database table into a DataFrame.
  830. Parameters
  831. ----------
  832. table_name : string
  833. Name of SQL table in database.
  834. index_col : string, optional, default: None
  835. Column to set as index.
  836. coerce_float : boolean, default True
  837. Attempts to convert values of non-string, non-numeric objects
  838. (like decimal.Decimal) to floating point. This can result in
  839. loss of precision.
  840. parse_dates : list or dict, default: None
  841. - List of column names to parse as dates.
  842. - Dict of ``{column_name: format string}`` where format string is
  843. strftime compatible in case of parsing string times, or is one of
  844. (D, s, ns, ms, us) in case of parsing integer timestamps.
  845. - Dict of ``{column_name: arg}``, where the arg corresponds
  846. to the keyword arguments of :func:`pandas.to_datetime`.
  847. Especially useful with databases without native Datetime support,
  848. such as SQLite.
  849. columns : list, default: None
  850. List of column names to select from SQL table.
  851. schema : string, default None
  852. Name of SQL schema in database to query (if database flavor
  853. supports this). If specified, this overwrites the default
  854. schema of the SQL database object.
  855. chunksize : int, default None
  856. If specified, return an iterator where `chunksize` is the number
  857. of rows to include in each chunk.
  858. Returns
  859. -------
  860. DataFrame
  861. See Also
  862. --------
  863. pandas.read_sql_table
  864. SQLDatabase.read_query
  865. """
  866. table = SQLTable(table_name, self, index=index_col, schema=schema)
  867. return table.read(coerce_float=coerce_float,
  868. parse_dates=parse_dates, columns=columns,
  869. chunksize=chunksize)
  870. @staticmethod
  871. def _query_iterator(result, chunksize, columns, index_col=None,
  872. coerce_float=True, parse_dates=None):
  873. """Return generator through chunked result set"""
  874. while True:
  875. data = result.fetchmany(chunksize)
  876. if not data:
  877. break
  878. else:
  879. yield _wrap_result(data, columns, index_col=index_col,
  880. coerce_float=coerce_float,
  881. parse_dates=parse_dates)
  882. def read_query(self, sql, index_col=None, coerce_float=True,
  883. parse_dates=None, params=None, chunksize=None):
  884. """Read SQL query into a DataFrame.
  885. Parameters
  886. ----------
  887. sql : string
  888. SQL query to be executed.
  889. index_col : string, optional, default: None
  890. Column name to use as index for the returned DataFrame object.
  891. coerce_float : boolean, default True
  892. Attempt to convert values of non-string, non-numeric objects (like
  893. decimal.Decimal) to floating point, useful for SQL result sets.
  894. params : list, tuple or dict, optional, default: None
  895. List of parameters to pass to execute method. The syntax used
  896. to pass parameters is database driver dependent. Check your
  897. database driver documentation for which of the five syntax styles,
  898. described in PEP 249's paramstyle, is supported.
  899. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
  900. parse_dates : list or dict, default: None
  901. - List of column names to parse as dates.
  902. - Dict of ``{column_name: format string}`` where format string is
  903. strftime compatible in case of parsing string times, or is one of
  904. (D, s, ns, ms, us) in case of parsing integer timestamps.
  905. - Dict of ``{column_name: arg dict}``, where the arg dict
  906. corresponds to the keyword arguments of
  907. :func:`pandas.to_datetime` Especially useful with databases
  908. without native Datetime support, such as SQLite.
  909. chunksize : int, default None
  910. If specified, return an iterator where `chunksize` is the number
  911. of rows to include in each chunk.
  912. Returns
  913. -------
  914. DataFrame
  915. See Also
  916. --------
  917. read_sql_table : Read SQL database table into a DataFrame.
  918. read_sql
  919. """
  920. args = _convert_params(sql, params)
  921. result = self.execute(*args)
  922. columns = result.keys()
  923. if chunksize is not None:
  924. return self._query_iterator(result, chunksize, columns,
  925. index_col=index_col,
  926. coerce_float=coerce_float,
  927. parse_dates=parse_dates)
  928. else:
  929. data = result.fetchall()
  930. frame = _wrap_result(data, columns, index_col=index_col,
  931. coerce_float=coerce_float,
  932. parse_dates=parse_dates)
  933. return frame
  934. read_sql = read_query
  935. def to_sql(self, frame, name, if_exists='fail', index=True,
  936. index_label=None, schema=None, chunksize=None, dtype=None,
  937. method=None):
  938. """
  939. Write records stored in a DataFrame to a SQL database.
  940. Parameters
  941. ----------
  942. frame : DataFrame
  943. name : string
  944. Name of SQL table.
  945. if_exists : {'fail', 'replace', 'append'}, default 'fail'
  946. - fail: If table exists, do nothing.
  947. - replace: If table exists, drop it, recreate it, and insert data.
  948. - append: If table exists, insert data. Create if does not exist.
  949. index : boolean, default True
  950. Write DataFrame index as a column.
  951. index_label : string or sequence, default None
  952. Column label for index column(s). If None is given (default) and
  953. `index` is True, then the index names are used.
  954. A sequence should be given if the DataFrame uses MultiIndex.
  955. schema : string, default None
  956. Name of SQL schema in database to write to (if database flavor
  957. supports this). If specified, this overwrites the default
  958. schema of the SQLDatabase object.
  959. chunksize : int, default None
  960. If not None, then rows will be written in batches of this size at a
  961. time. If None, all rows will be written at once.
  962. dtype : single type or dict of column name to SQL type, default None
  963. Optional specifying the datatype for columns. The SQL type should
  964. be a SQLAlchemy type. If all columns are of the same type, one
  965. single value can be used.
  966. method : {None', 'multi', callable}, default None
  967. Controls the SQL insertion clause used:
  968. * None : Uses standard SQL ``INSERT`` clause (one per row).
  969. * 'multi': Pass multiple values in a single ``INSERT`` clause.
  970. * callable with signature ``(pd_table, conn, keys, data_iter)``.
  971. Details and a sample callable implementation can be found in the
  972. section :ref:`insert method <io.sql.method>`.
  973. .. versionadded:: 0.24.0
  974. """
  975. if dtype and not is_dict_like(dtype):
  976. dtype = {col_name: dtype for col_name in frame}
  977. if dtype is not None:
  978. from sqlalchemy.types import to_instance, TypeEngine
  979. for col, my_type in dtype.items():
  980. if not isinstance(to_instance(my_type), TypeEngine):
  981. raise ValueError('The type of {column} is not a '
  982. 'SQLAlchemy type '.format(column=col))
  983. table = SQLTable(name, self, frame=frame, index=index,
  984. if_exists=if_exists, index_label=index_label,
  985. schema=schema, dtype=dtype)
  986. table.create()
  987. table.insert(chunksize, method=method)
  988. if (not name.isdigit() and not name.islower()):
  989. # check for potentially case sensitivity issues (GH7815)
  990. # Only check when name is not a number and name is not lower case
  991. engine = self.connectable.engine
  992. with self.connectable.connect() as conn:
  993. table_names = engine.table_names(
  994. schema=schema or self.meta.schema,
  995. connection=conn,
  996. )
  997. if name not in table_names:
  998. msg = (
  999. "The provided table name '{0}' is not found exactly as "
  1000. "such in the database after writing the table, possibly "
  1001. "due to case sensitivity issues. Consider using lower "
  1002. "case table names."
  1003. ).format(name)
  1004. warnings.warn(msg, UserWarning)
  1005. @property
  1006. def tables(self):
  1007. return self.meta.tables
  1008. def has_table(self, name, schema=None):
  1009. return self.connectable.run_callable(
  1010. self.connectable.dialect.has_table,
  1011. name,
  1012. schema or self.meta.schema,
  1013. )
  1014. def get_table(self, table_name, schema=None):
  1015. schema = schema or self.meta.schema
  1016. if schema:
  1017. tbl = self.meta.tables.get('.'.join([schema, table_name]))
  1018. else:
  1019. tbl = self.meta.tables.get(table_name)
  1020. # Avoid casting double-precision floats into decimals
  1021. from sqlalchemy import Numeric
  1022. for column in tbl.columns:
  1023. if isinstance(column.type, Numeric):
  1024. column.type.asdecimal = False
  1025. return tbl
  1026. def drop_table(self, table_name, schema=None):
  1027. schema = schema or self.meta.schema
  1028. if self.has_table(table_name, schema):
  1029. self.meta.reflect(only=[table_name], schema=schema)
  1030. self.get_table(table_name, schema).drop()
  1031. self.meta.clear()
  1032. def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
  1033. table = SQLTable(table_name, self, frame=frame, index=False, keys=keys,
  1034. dtype=dtype)
  1035. return str(table.sql_schema())
  1036. # ---- SQL without SQLAlchemy ---
  1037. # sqlite-specific sql strings and handler class
  1038. # dictionary used for readability purposes
  1039. _SQL_TYPES = {
  1040. 'string': 'TEXT',
  1041. 'floating': 'REAL',
  1042. 'integer': 'INTEGER',
  1043. 'datetime': 'TIMESTAMP',
  1044. 'date': 'DATE',
  1045. 'time': 'TIME',
  1046. 'boolean': 'INTEGER',
  1047. }
  1048. def _get_unicode_name(name):
  1049. try:
  1050. uname = text_type(name).encode("utf-8", "strict").decode("utf-8")
  1051. except UnicodeError:
  1052. raise ValueError(
  1053. "Cannot convert identifier to UTF-8: '{name}'".format(name=name))
  1054. return uname
  1055. def _get_valid_sqlite_name(name):
  1056. # See http://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
  1057. # -for-sqlite-table-column-names-in-python
  1058. # Ensure the string can be encoded as UTF-8.
  1059. # Ensure the string does not include any NUL characters.
  1060. # Replace all " with "".
  1061. # Wrap the entire thing in double quotes.
  1062. uname = _get_unicode_name(name)
  1063. if not len(uname):
  1064. raise ValueError("Empty table or column name specified")
  1065. nul_index = uname.find("\x00")
  1066. if nul_index >= 0:
  1067. raise ValueError('SQLite identifier cannot contain NULs')
  1068. return '"' + uname.replace('"', '""') + '"'
  1069. _SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. "
  1070. "In pandas versions < 0.14, spaces were converted to "
  1071. "underscores.")
  1072. class SQLiteTable(SQLTable):
  1073. """
  1074. Patch the SQLTable for fallback support.
  1075. Instead of a table variable just use the Create Table statement.
  1076. """
  1077. def __init__(self, *args, **kwargs):
  1078. # GH 8341
  1079. # register an adapter callable for datetime.time object
  1080. import sqlite3
  1081. # this will transform time(12,34,56,789) into '12:34:56.000789'
  1082. # (this is what sqlalchemy does)
  1083. sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
  1084. super(SQLiteTable, self).__init__(*args, **kwargs)
  1085. def sql_schema(self):
  1086. return str(";\n".join(self.table))
  1087. def _execute_create(self):
  1088. with self.pd_sql.run_transaction() as conn:
  1089. for stmt in self.table:
  1090. conn.execute(stmt)
  1091. def insert_statement(self):
  1092. names = list(map(text_type, self.frame.columns))
  1093. wld = '?' # wildcard char
  1094. escape = _get_valid_sqlite_name
  1095. if self.index is not None:
  1096. [names.insert(0, idx) for idx in self.index[::-1]]
  1097. bracketed_names = [escape(column) for column in names]
  1098. col_names = ','.join(bracketed_names)
  1099. wildcards = ','.join([wld] * len(names))
  1100. insert_statement = \
  1101. u'INSERT INTO {table} ({columns}) VALUES ({wld})'.format(
  1102. table=escape(self.name), columns=col_names, wld=wildcards)
  1103. return insert_statement
  1104. def _execute_insert(self, conn, keys, data_iter):
  1105. data_list = list(data_iter)
  1106. conn.executemany(self.insert_statement(), data_list)
  1107. def _create_table_setup(self):
  1108. """
  1109. Return a list of SQL statements that creates a table reflecting the
  1110. structure of a DataFrame. The first entry will be a CREATE TABLE
  1111. statement while the rest will be CREATE INDEX statements.
  1112. """
  1113. column_names_and_types = self._get_column_names_and_types(
  1114. self._sql_type_name
  1115. )
  1116. pat = re.compile(r'\s+')
  1117. column_names = [col_name for col_name, _, _ in column_names_and_types]
  1118. if any(map(pat.search, column_names)):
  1119. warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
  1120. escape = _get_valid_sqlite_name
  1121. create_tbl_stmts = [escape(cname) + ' ' + ctype
  1122. for cname, ctype, _ in column_names_and_types]
  1123. if self.keys is not None and len(self.keys):
  1124. if not is_list_like(self.keys):
  1125. keys = [self.keys]
  1126. else:
  1127. keys = self.keys
  1128. cnames_br = ", ".join(escape(c) for c in keys)
  1129. create_tbl_stmts.append(
  1130. "CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
  1131. tbl=self.name, cnames_br=cnames_br))
  1132. create_stmts = ["CREATE TABLE " + escape(self.name) + " (\n" +
  1133. ',\n '.join(create_tbl_stmts) + "\n)"]
  1134. ix_cols = [cname for cname, _, is_index in column_names_and_types
  1135. if is_index]
  1136. if len(ix_cols):
  1137. cnames = "_".join(ix_cols)
  1138. cnames_br = ",".join(escape(c) for c in ix_cols)
  1139. create_stmts.append(
  1140. "CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) +
  1141. "ON " + escape(self.name) + " (" + cnames_br + ")")
  1142. return create_stmts
  1143. def _sql_type_name(self, col):
  1144. dtype = self.dtype or {}
  1145. if col.name in dtype:
  1146. return dtype[col.name]
  1147. # Infer type of column, while ignoring missing values.
  1148. # Needed for inserting typed data containing NULLs, GH 8778.
  1149. col_type = lib.infer_dtype(col, skipna=True)
  1150. if col_type == 'timedelta64':
  1151. warnings.warn("the 'timedelta' type is not supported, and will be "
  1152. "written as integer values (ns frequency) to the "
  1153. "database.", UserWarning, stacklevel=8)
  1154. col_type = "integer"
  1155. elif col_type == "datetime64":
  1156. col_type = "datetime"
  1157. elif col_type == "empty":
  1158. col_type = "string"
  1159. elif col_type == "complex":
  1160. raise ValueError('Complex datatypes not supported')
  1161. if col_type not in _SQL_TYPES:
  1162. col_type = "string"
  1163. return _SQL_TYPES[col_type]
  1164. class SQLiteDatabase(PandasSQL):
  1165. """
  1166. Version of SQLDatabase to support SQLite connections (fallback without
  1167. SQLAlchemy). This should only be used internally.
  1168. Parameters
  1169. ----------
  1170. con : sqlite connection object
  1171. """
  1172. def __init__(self, con, is_cursor=False):
  1173. self.is_cursor = is_cursor
  1174. self.con = con
  1175. @contextmanager
  1176. def run_transaction(self):
  1177. cur = self.con.cursor()
  1178. try:
  1179. yield cur
  1180. self.con.commit()
  1181. except Exception:
  1182. self.con.rollback()
  1183. raise
  1184. finally:
  1185. cur.close()
  1186. def execute(self, *args, **kwargs):
  1187. if self.is_cursor:
  1188. cur = self.con
  1189. else:
  1190. cur = self.con.cursor()
  1191. try:
  1192. if kwargs:
  1193. cur.execute(*args, **kwargs)
  1194. else:
  1195. cur.execute(*args)
  1196. return cur
  1197. except Exception as exc:
  1198. try:
  1199. self.con.rollback()
  1200. except Exception: # pragma: no cover
  1201. ex = DatabaseError(
  1202. "Execution failed on sql: {sql}\n{exc}\nunable "
  1203. "to rollback".format(sql=args[0], exc=exc))
  1204. raise_with_traceback(ex)
  1205. ex = DatabaseError(
  1206. "Execution failed on sql '{sql}': {exc}".format(
  1207. sql=args[0], exc=exc))
  1208. raise_with_traceback(ex)
  1209. @staticmethod
  1210. def _query_iterator(cursor, chunksize, columns, index_col=None,
  1211. coerce_float=True, parse_dates=None):
  1212. """Return generator through chunked result set"""
  1213. while True:
  1214. data = cursor.fetchmany(chunksize)
  1215. if type(data) == tuple:
  1216. data = list(data)
  1217. if not data:
  1218. cursor.close()
  1219. break
  1220. else:
  1221. yield _wrap_result(data, columns, index_col=index_col,
  1222. coerce_float=coerce_float,
  1223. parse_dates=parse_dates)
  1224. def read_query(self, sql, index_col=None, coerce_float=True, params=None,
  1225. parse_dates=None, chunksize=None):
  1226. args = _convert_params(sql, params)
  1227. cursor = self.execute(*args)
  1228. columns = [col_desc[0] for col_desc in cursor.description]
  1229. if chunksize is not None:
  1230. return self._query_iterator(cursor, chunksize, columns,
  1231. index_col=index_col,
  1232. coerce_float=coerce_float,
  1233. parse_dates=parse_dates)
  1234. else:
  1235. data = self._fetchall_as_list(cursor)
  1236. cursor.close()
  1237. frame = _wrap_result(data, columns, index_col=index_col,
  1238. coerce_float=coerce_float,
  1239. parse_dates=parse_dates)
  1240. return frame
  1241. def _fetchall_as_list(self, cur):
  1242. result = cur.fetchall()
  1243. if not isinstance(result, list):
  1244. result = list(result)
  1245. return result
  1246. def to_sql(self, frame, name, if_exists='fail', index=True,
  1247. index_label=None, schema=None, chunksize=None, dtype=None,
  1248. method=None):
  1249. """
  1250. Write records stored in a DataFrame to a SQL database.
  1251. Parameters
  1252. ----------
  1253. frame: DataFrame
  1254. name: string
  1255. Name of SQL table.
  1256. if_exists: {'fail', 'replace', 'append'}, default 'fail'
  1257. fail: If table exists, do nothing.
  1258. replace: If table exists, drop it, recreate it, and insert data.
  1259. append: If table exists, insert data. Create if it does not exist.
  1260. index : boolean, default True
  1261. Write DataFrame index as a column
  1262. index_label : string or sequence, default None
  1263. Column label for index column(s). If None is given (default) and
  1264. `index` is True, then the index names are used.
  1265. A sequence should be given if the DataFrame uses MultiIndex.
  1266. schema : string, default None
  1267. Ignored parameter included for compatibility with SQLAlchemy
  1268. version of ``to_sql``.
  1269. chunksize : int, default None
  1270. If not None, then rows will be written in batches of this
  1271. size at a time. If None, all rows will be written at once.
  1272. dtype : single type or dict of column name to SQL type, default None
  1273. Optional specifying the datatype for columns. The SQL type should
  1274. be a string. If all columns are of the same type, one single value
  1275. can be used.
  1276. method : {None, 'multi', callable}, default None
  1277. Controls the SQL insertion clause used:
  1278. * None : Uses standard SQL ``INSERT`` clause (one per row).
  1279. * 'multi': Pass multiple values in a single ``INSERT`` clause.
  1280. * callable with signature ``(pd_table, conn, keys, data_iter)``.
  1281. Details and a sample callable implementation can be found in the
  1282. section :ref:`insert method <io.sql.method>`.
  1283. .. versionadded:: 0.24.0
  1284. """
  1285. if dtype and not is_dict_like(dtype):
  1286. dtype = {col_name: dtype for col_name in frame}
  1287. if dtype is not None:
  1288. for col, my_type in dtype.items():
  1289. if not isinstance(my_type, str):
  1290. raise ValueError('{column} ({type!s}) not a string'.format(
  1291. column=col, type=my_type))
  1292. table = SQLiteTable(name, self, frame=frame, index=index,
  1293. if_exists=if_exists, index_label=index_label,
  1294. dtype=dtype)
  1295. table.create()
  1296. table.insert(chunksize, method)
  1297. def has_table(self, name, schema=None):
  1298. # TODO(wesm): unused?
  1299. # escape = _get_valid_sqlite_name
  1300. # esc_name = escape(name)
  1301. wld = '?'
  1302. query = ("SELECT name FROM sqlite_master "
  1303. "WHERE type='table' AND name={wld};").format(wld=wld)
  1304. return len(self.execute(query, [name, ]).fetchall()) > 0
  1305. def get_table(self, table_name, schema=None):
  1306. return None # not supported in fallback mode
  1307. def drop_table(self, name, schema=None):
  1308. drop_sql = "DROP TABLE {name}".format(
  1309. name=_get_valid_sqlite_name(name))
  1310. self.execute(drop_sql)
  1311. def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
  1312. table = SQLiteTable(table_name, self, frame=frame, index=False,
  1313. keys=keys, dtype=dtype)
  1314. return str(table.sql_schema())
  1315. def get_schema(frame, name, keys=None, con=None, dtype=None):
  1316. """
  1317. Get the SQL db table schema for the given frame.
  1318. Parameters
  1319. ----------
  1320. frame : DataFrame
  1321. name : string
  1322. name of SQL table
  1323. keys : string or sequence, default: None
  1324. columns to use a primary key
  1325. con: an open SQL database connection object or a SQLAlchemy connectable
  1326. Using SQLAlchemy makes it possible to use any DB supported by that
  1327. library, default: None
  1328. If a DBAPI2 object, only sqlite3 is supported.
  1329. dtype : dict of column name to SQL type, default None
  1330. Optional specifying the datatype for columns. The SQL type should
  1331. be a SQLAlchemy type, or a string for sqlite3 fallback connection.
  1332. """
  1333. pandas_sql = pandasSQL_builder(con=con)
  1334. return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)