base.py 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064
  1. # mssql/base.py
  2. # Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
  3. # <see AUTHORS file>
  4. #
  5. # This module is part of SQLAlchemy and is released under
  6. # the MIT License: http://www.opensource.org/licenses/mit-license.php
  7. """
  8. .. dialect:: mssql
  9. :name: Microsoft SQL Server
  10. Auto Increment Behavior
  11. -----------------------
  12. SQL Server provides so-called "auto incrementing" behavior using the
  13. ``IDENTITY`` construct, which can be placed on an integer primary key.
  14. SQLAlchemy considers ``IDENTITY`` within its default "autoincrement" behavior,
  15. described at :paramref:`.Column.autoincrement`; this means
  16. that by default, the first integer primary key column in a :class:`.Table`
  17. will be considered to be the identity column and will generate DDL as such::
  18. from sqlalchemy import Table, MetaData, Column, Integer
  19. m = MetaData()
  20. t = Table('t', m,
  21. Column('id', Integer, primary_key=True),
  22. Column('x', Integer))
  23. m.create_all(engine)
  24. The above example will generate DDL as:
  25. .. sourcecode:: sql
  26. CREATE TABLE t (
  27. id INTEGER NOT NULL IDENTITY(1,1),
  28. x INTEGER NULL,
  29. PRIMARY KEY (id)
  30. )
  31. For the case where this default generation of ``IDENTITY`` is not desired,
  32. specify ``autoincrement=False`` on all integer primary key columns::
  33. m = MetaData()
  34. t = Table('t', m,
  35. Column('id', Integer, primary_key=True, autoincrement=False),
  36. Column('x', Integer))
  37. m.create_all(engine)
  38. .. note::
  39. An INSERT statement which refers to an explicit value for such
  40. a column is prohibited by SQL Server, however SQLAlchemy will detect this
  41. and modify the ``IDENTITY_INSERT`` flag accordingly at statement execution
  42. time. As this is not a high performing process, care should be taken to
  43. set the ``autoincrement`` flag appropriately for columns that will not
  44. actually require IDENTITY behavior.
  45. Controlling "Start" and "Increment"
  46. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  47. Specific control over the parameters of the ``IDENTITY`` value is supported
  48. using the :class:`.schema.Sequence` object. While this object normally
  49. represents an explicit "sequence" for supporting backends, on SQL Server it is
  50. re-purposed to specify behavior regarding the identity column, including
  51. support of the "start" and "increment" values::
  52. from sqlalchemy import Table, Integer, Sequence, Column
  53. Table('test', metadata,
  54. Column('id', Integer,
  55. Sequence('blah', start=100, increment=10),
  56. primary_key=True),
  57. Column('name', String(20))
  58. ).create(some_engine)
  59. would yield:
  60. .. sourcecode:: sql
  61. CREATE TABLE test (
  62. id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
  63. name VARCHAR(20) NULL,
  64. )
  65. Note that the ``start`` and ``increment`` values for sequences are
  66. optional and will default to 1,1.
  67. INSERT behavior
  68. ^^^^^^^^^^^^^^^^
  69. Handling of the ``IDENTITY`` column at INSERT time involves two key
  70. techniques. The most common is being able to fetch the "last inserted value"
  71. for a given ``IDENTITY`` column, a process which SQLAlchemy performs
  72. implicitly in many cases, most importantly within the ORM.
  73. The process for fetching this value has several variants:
  74. * In the vast majority of cases, RETURNING is used in conjunction with INSERT
  75. statements on SQL Server in order to get newly generated primary key values:
  76. .. sourcecode:: sql
  77. INSERT INTO t (x) OUTPUT inserted.id VALUES (?)
  78. * When RETURNING is not available or has been disabled via
  79. ``implicit_returning=False``, either the ``scope_identity()`` function or
  80. the ``@@identity`` variable is used; behavior varies by backend:
  81. * when using PyODBC, the phrase ``; select scope_identity()`` will be
  82. appended to the end of the INSERT statement; a second result set will be
  83. fetched in order to receive the value. Given a table as::
  84. t = Table('t', m, Column('id', Integer, primary_key=True),
  85. Column('x', Integer),
  86. implicit_returning=False)
  87. an INSERT will look like:
  88. .. sourcecode:: sql
  89. INSERT INTO t (x) VALUES (?); select scope_identity()
  90. * Other dialects such as pymssql will call upon
  91. ``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT
  92. statement. If the flag ``use_scope_identity=False`` is passed to
  93. :func:`.create_engine`, the statement ``SELECT @@identity AS lastrowid``
  94. is used instead.
  95. A table that contains an ``IDENTITY`` column will prohibit an INSERT statement
  96. that refers to the identity column explicitly. The SQLAlchemy dialect will
  97. detect when an INSERT construct, created using a core :func:`.insert`
  98. construct (not a plain string SQL), refers to the identity column, and
  99. in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert
  100. statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
  101. execution. Given this example::
  102. m = MetaData()
  103. t = Table('t', m, Column('id', Integer, primary_key=True),
  104. Column('x', Integer))
  105. m.create_all(engine)
  106. engine.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
  107. The above column will be created with IDENTITY, however the INSERT statement
  108. we emit is specifying explicit values. In the echo output we can see
  109. how SQLAlchemy handles this:
  110. .. sourcecode:: sql
  111. CREATE TABLE t (
  112. id INTEGER NOT NULL IDENTITY(1,1),
  113. x INTEGER NULL,
  114. PRIMARY KEY (id)
  115. )
  116. COMMIT
  117. SET IDENTITY_INSERT t ON
  118. INSERT INTO t (id, x) VALUES (?, ?)
  119. ((1, 1), (2, 2))
  120. SET IDENTITY_INSERT t OFF
  121. COMMIT
  122. This
  123. is an auxiliary use case suitable for testing and bulk insert scenarios.
  124. MAX on VARCHAR / NVARCHAR
  125. -------------------------
  126. SQL Server supports the special string "MAX" within the
  127. :class:`.sqltypes.VARCHAR` and :class:`.sqltypes.NVARCHAR` datatypes,
  128. to indicate "maximum length possible". The dialect currently handles this as
  129. a length of "None" in the base type, rather than supplying a
  130. dialect-specific version of these types, so that a base type
  131. specified such as ``VARCHAR(None)`` can assume "unlengthed" behavior on
  132. more than one backend without using dialect-specific types.
  133. To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None::
  134. my_table = Table(
  135. 'my_table', metadata,
  136. Column('my_data', VARCHAR(None)),
  137. Column('my_n_data', NVARCHAR(None))
  138. )
  139. Collation Support
  140. -----------------
  141. Character collations are supported by the base string types,
  142. specified by the string argument "collation"::
  143. from sqlalchemy import VARCHAR
  144. Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
  145. When such a column is associated with a :class:`.Table`, the
  146. CREATE TABLE statement for this column will yield::
  147. login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
  148. .. versionadded:: 0.8 Character collations are now part of the base string
  149. types.
  150. LIMIT/OFFSET Support
  151. --------------------
  152. MSSQL has no support for the LIMIT or OFFSET keywords. LIMIT is
  153. supported directly through the ``TOP`` Transact SQL keyword::
  154. select.limit
  155. will yield::
  156. SELECT TOP n
  157. If using SQL Server 2005 or above, LIMIT with OFFSET
  158. support is available through the ``ROW_NUMBER OVER`` construct.
  159. For versions below 2005, LIMIT with OFFSET usage will fail.
  160. .. _mssql_isolation_level:
  161. Transaction Isolation Level
  162. ---------------------------
  163. All SQL Server dialects support setting of transaction isolation level
  164. both via a dialect-specific parameter
  165. :paramref:`.create_engine.isolation_level`
  166. accepted by :func:`.create_engine`,
  167. as well as the :paramref:`.Connection.execution_options.isolation_level`
  168. argument as passed to
  169. :meth:`.Connection.execution_options`. This feature works by issuing the
  170. command ``SET TRANSACTION ISOLATION LEVEL <level>`` for
  171. each new connection.
  172. To set isolation level using :func:`.create_engine`::
  173. engine = create_engine(
  174. "mssql+pyodbc://scott:tiger@ms_2008",
  175. isolation_level="REPEATABLE READ"
  176. )
  177. To set using per-connection execution options::
  178. connection = engine.connect()
  179. connection = connection.execution_options(
  180. isolation_level="READ COMMITTED"
  181. )
  182. Valid values for ``isolation_level`` include:
  183. * ``READ COMMITTED``
  184. * ``READ UNCOMMITTED``
  185. * ``REPEATABLE READ``
  186. * ``SERIALIZABLE``
  187. * ``SNAPSHOT`` - specific to SQL Server
  188. .. versionadded:: 1.1 support for isolation level setting on Microsoft
  189. SQL Server.
  190. Nullability
  191. -----------
  192. MSSQL has support for three levels of column nullability. The default
  193. nullability allows nulls and is explicit in the CREATE TABLE
  194. construct::
  195. name VARCHAR(20) NULL
  196. If ``nullable=None`` is specified then no specification is made. In
  197. other words the database's configured default is used. This will
  198. render::
  199. name VARCHAR(20)
  200. If ``nullable`` is ``True`` or ``False`` then the column will be
  201. ``NULL`` or ``NOT NULL`` respectively.
  202. Date / Time Handling
  203. --------------------
  204. DATE and TIME are supported. Bind parameters are converted
  205. to datetime.datetime() objects as required by most MSSQL drivers,
  206. and results are processed from strings if needed.
  207. The DATE and TIME types are not available for MSSQL 2005 and
  208. previous - if a server version below 2008 is detected, DDL
  209. for these types will be issued as DATETIME.
  210. .. _mssql_large_type_deprecation:
  211. Large Text/Binary Type Deprecation
  212. ----------------------------------
  213. Per `SQL Server 2012/2014 Documentation <http://technet.microsoft.com/en-us/library/ms187993.aspx>`_,
  214. the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL Server
  215. in a future release. SQLAlchemy normally relates these types to the
  216. :class:`.UnicodeText`, :class:`.Text` and :class:`.LargeBinary` datatypes.
  217. In order to accommodate this change, a new flag ``deprecate_large_types``
  218. is added to the dialect, which will be automatically set based on detection
  219. of the server version in use, if not otherwise set by the user. The
  220. behavior of this flag is as follows:
  221. * When this flag is ``True``, the :class:`.UnicodeText`, :class:`.Text` and
  222. :class:`.LargeBinary` datatypes, when used to render DDL, will render the
  223. types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``,
  224. respectively. This is a new behavior as of the addition of this flag.
  225. * When this flag is ``False``, the :class:`.UnicodeText`, :class:`.Text` and
  226. :class:`.LargeBinary` datatypes, when used to render DDL, will render the
  227. types ``NTEXT``, ``TEXT``, and ``IMAGE``,
  228. respectively. This is the long-standing behavior of these types.
  229. * The flag begins with the value ``None``, before a database connection is
  230. established. If the dialect is used to render DDL without the flag being
  231. set, it is interpreted the same as ``False``.
  232. * On first connection, the dialect detects if SQL Server version 2012 or greater
  233. is in use; if the flag is still at ``None``, it sets it to ``True`` or
  234. ``False`` based on whether 2012 or greater is detected.
  235. * The flag can be set to either ``True`` or ``False`` when the dialect
  236. is created, typically via :func:`.create_engine`::
  237. eng = create_engine("mssql+pymssql://user:pass@host/db",
  238. deprecate_large_types=True)
  239. * Complete control over whether the "old" or "new" types are rendered is
  240. available in all SQLAlchemy versions by using the UPPERCASE type objects
  241. instead: :class:`.NVARCHAR`, :class:`.VARCHAR`, :class:`.types.VARBINARY`,
  242. :class:`.TEXT`, :class:`.mssql.NTEXT`, :class:`.mssql.IMAGE` will always remain
  243. fixed and always output exactly that type.
  244. .. versionadded:: 1.0.0
  245. .. _legacy_schema_rendering:
  246. Legacy Schema Mode
  247. ------------------
  248. Very old versions of the MSSQL dialect introduced the behavior such that a
  249. schema-qualified table would be auto-aliased when used in a
  250. SELECT statement; given a table::
  251. account_table = Table(
  252. 'account', metadata,
  253. Column('id', Integer, primary_key=True),
  254. Column('info', String(100)),
  255. schema="customer_schema"
  256. )
  257. this legacy mode of rendering would assume that "customer_schema.account"
  258. would not be accepted by all parts of the SQL statement, as illustrated
  259. below::
  260. >>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=True)
  261. >>> print(account_table.select().compile(eng))
  262. SELECT account_1.id, account_1.info
  263. FROM customer_schema.account AS account_1
  264. This mode of behavior is now off by default, as it appears to have served
  265. no purpose; however in the case that legacy applications rely upon it,
  266. it is available using the ``legacy_schema_aliasing`` argument to
  267. :func:`.create_engine` as illustrated above.
  268. .. versionchanged:: 1.1 the ``legacy_schema_aliasing`` flag introduced
  269. in version 1.0.5 to allow disabling of legacy mode for schemas now
  270. defaults to False.
  271. .. _mssql_indexes:
  272. Clustered Index Support
  273. -----------------------
  274. The MSSQL dialect supports clustered indexes (and primary keys) via the
  275. ``mssql_clustered`` option. This option is available to :class:`.Index`,
  276. :class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`.
  277. To generate a clustered index::
  278. Index("my_index", table.c.x, mssql_clustered=True)
  279. which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
  280. To generate a clustered primary key use::
  281. Table('my_table', metadata,
  282. Column('x', ...),
  283. Column('y', ...),
  284. PrimaryKeyConstraint("x", "y", mssql_clustered=True))
  285. which will render the table, for example, as::
  286. CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
  287. PRIMARY KEY CLUSTERED (x, y))
  288. Similarly, we can generate a clustered unique constraint using::
  289. Table('my_table', metadata,
  290. Column('x', ...),
  291. Column('y', ...),
  292. PrimaryKeyConstraint("x"),
  293. UniqueConstraint("y", mssql_clustered=True),
  294. )
  295. To explicitly request a non-clustered primary key (for example, when
  296. a separate clustered index is desired), use::
  297. Table('my_table', metadata,
  298. Column('x', ...),
  299. Column('y', ...),
  300. PrimaryKeyConstraint("x", "y", mssql_clustered=False))
  301. which will render the table, for example, as::
  302. CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
  303. PRIMARY KEY NONCLUSTERED (x, y))
  304. .. versionchanged:: 1.1 the ``mssql_clustered`` option now defaults
  305. to None, rather than False. ``mssql_clustered=False`` now explicitly
  306. renders the NONCLUSTERED clause, whereas None omits the CLUSTERED
  307. clause entirely, allowing SQL Server defaults to take effect.
  308. MSSQL-Specific Index Options
  309. -----------------------------
  310. In addition to clustering, the MSSQL dialect supports other special options
  311. for :class:`.Index`.
  312. INCLUDE
  313. ^^^^^^^
  314. The ``mssql_include`` option renders INCLUDE(colname) for the given string
  315. names::
  316. Index("my_index", table.c.x, mssql_include=['y'])
  317. would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
  318. .. versionadded:: 0.8
  319. Index ordering
  320. ^^^^^^^^^^^^^^
  321. Index ordering is available via functional expressions, such as::
  322. Index("my_index", table.c.x.desc())
  323. would render the index as ``CREATE INDEX my_index ON table (x DESC)``
  324. .. versionadded:: 0.8
  325. .. seealso::
  326. :ref:`schema_indexes_functional`
  327. Compatibility Levels
  328. --------------------
  329. MSSQL supports the notion of setting compatibility levels at the
  330. database level. This allows, for instance, to run a database that
  331. is compatible with SQL2000 while running on a SQL2005 database
  332. server. ``server_version_info`` will always return the database
  333. server version information (in this case SQL2005) and not the
  334. compatibility level information. Because of this, if running under
  335. a backwards compatibility mode SQAlchemy may attempt to use T-SQL
  336. statements that are unable to be parsed by the database server.
  337. Triggers
  338. --------
  339. SQLAlchemy by default uses OUTPUT INSERTED to get at newly
  340. generated primary key values via IDENTITY columns or other
  341. server side defaults. MS-SQL does not
  342. allow the usage of OUTPUT INSERTED on tables that have triggers.
  343. To disable the usage of OUTPUT INSERTED on a per-table basis,
  344. specify ``implicit_returning=False`` for each :class:`.Table`
  345. which has triggers::
  346. Table('mytable', metadata,
  347. Column('id', Integer, primary_key=True),
  348. # ...,
  349. implicit_returning=False
  350. )
  351. Declarative form::
  352. class MyClass(Base):
  353. # ...
  354. __table_args__ = {'implicit_returning':False}
  355. This option can also be specified engine-wide using the
  356. ``implicit_returning=False`` argument on :func:`.create_engine`.
  357. .. _mssql_rowcount_versioning:
  358. Rowcount Support / ORM Versioning
  359. ---------------------------------
  360. The SQL Server drivers have very limited ability to return the number
  361. of rows updated from an UPDATE or DELETE statement. In particular, the
  362. pymssql driver has no support, whereas the pyodbc driver can only return
  363. this value under certain conditions.
  364. In particular, updated rowcount is not available when OUTPUT INSERTED
  365. is used. This impacts the SQLAlchemy ORM's versioning feature when
  366. server-side versioning schemes are used. When
  367. using pyodbc, the "implicit_returning" flag needs to be set to false
  368. for any ORM mapped class that uses a version_id column in conjunction with
  369. a server-side version generator::
  370. class MyTable(Base):
  371. __tablename__ = 'mytable'
  372. id = Column(Integer, primary_key=True)
  373. stuff = Column(String(10))
  374. timestamp = Column(TIMESTAMP(), default=text('DEFAULT'))
  375. __mapper_args__ = {
  376. 'version_id_col': timestamp,
  377. 'version_id_generator': False,
  378. }
  379. __table_args__ = {
  380. 'implicit_returning': False
  381. }
  382. Without the implicit_returning flag above, the UPDATE statement will
  383. use ``OUTPUT inserted.timestamp`` and the rowcount will be returned as
  384. -1, causing the versioning logic to fail.
  385. Enabling Snapshot Isolation
  386. ---------------------------
  387. Not necessarily specific to SQLAlchemy, SQL Server has a default transaction
  388. isolation mode that locks entire tables, and causes even mildly concurrent
  389. applications to have long held locks and frequent deadlocks.
  390. Enabling snapshot isolation for the database as a whole is recommended
  391. for modern levels of concurrency support. This is accomplished via the
  392. following ALTER DATABASE commands executed at the SQL prompt::
  393. ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
  394. ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON
  395. Background on SQL Server snapshot isolation is available at
  396. http://msdn.microsoft.com/en-us/library/ms175095.aspx.
  397. Known Issues
  398. ------------
  399. * No support for more than one ``IDENTITY`` column per table
  400. * reflection of indexes does not work with versions older than
  401. SQL Server 2005
  402. """
  403. import datetime
  404. import operator
  405. import re
  406. from ... import sql, schema as sa_schema, exc, util
  407. from ...sql import compiler, expression, util as sql_util
  408. from ... import engine
  409. from ...engine import reflection, default
  410. from ... import types as sqltypes
  411. from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
  412. FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
  413. TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR
  414. from ...util import update_wrapper
  415. from . import information_schema as ischema
  416. # http://sqlserverbuilds.blogspot.com/
  417. MS_2016_VERSION = (13,)
  418. MS_2014_VERSION = (12,)
  419. MS_2012_VERSION = (11,)
  420. MS_2008_VERSION = (10,)
  421. MS_2005_VERSION = (9,)
  422. MS_2000_VERSION = (8,)
  423. RESERVED_WORDS = set(
  424. ['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization',
  425. 'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade',
  426. 'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce',
  427. 'collate', 'column', 'commit', 'compute', 'constraint', 'contains',
  428. 'containstable', 'continue', 'convert', 'create', 'cross', 'current',
  429. 'current_date', 'current_time', 'current_timestamp', 'current_user',
  430. 'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default',
  431. 'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double',
  432. 'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec',
  433. 'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor',
  434. 'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full',
  435. 'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity',
  436. 'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert',
  437. 'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like',
  438. 'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not',
  439. 'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource',
  440. 'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer',
  441. 'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print',
  442. 'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext',
  443. 'reconfigure', 'references', 'replication', 'restore', 'restrict',
  444. 'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount',
  445. 'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select',
  446. 'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics',
  447. 'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top',
  448. 'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union',
  449. 'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values',
  450. 'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with',
  451. 'writetext',
  452. ])
  453. class REAL(sqltypes.REAL):
  454. __visit_name__ = 'REAL'
  455. def __init__(self, **kw):
  456. # REAL is a synonym for FLOAT(24) on SQL server
  457. kw['precision'] = 24
  458. super(REAL, self).__init__(**kw)
  459. class TINYINT(sqltypes.Integer):
  460. __visit_name__ = 'TINYINT'
  461. # MSSQL DATE/TIME types have varied behavior, sometimes returning
  462. # strings. MSDate/TIME check for everything, and always
  463. # filter bind parameters into datetime objects (required by pyodbc,
  464. # not sure about other dialects).
  465. class _MSDate(sqltypes.Date):
  466. def bind_processor(self, dialect):
  467. def process(value):
  468. if type(value) == datetime.date:
  469. return datetime.datetime(value.year, value.month, value.day)
  470. else:
  471. return value
  472. return process
  473. _reg = re.compile(r"(\d+)-(\d+)-(\d+)")
  474. def result_processor(self, dialect, coltype):
  475. def process(value):
  476. if isinstance(value, datetime.datetime):
  477. return value.date()
  478. elif isinstance(value, util.string_types):
  479. m = self._reg.match(value)
  480. if not m:
  481. raise ValueError(
  482. "could not parse %r as a date value" % (value, ))
  483. return datetime.date(*[
  484. int(x or 0)
  485. for x in m.groups()
  486. ])
  487. else:
  488. return value
  489. return process
  490. class TIME(sqltypes.TIME):
  491. def __init__(self, precision=None, **kwargs):
  492. self.precision = precision
  493. super(TIME, self).__init__()
  494. __zero_date = datetime.date(1900, 1, 1)
  495. def bind_processor(self, dialect):
  496. def process(value):
  497. if isinstance(value, datetime.datetime):
  498. value = datetime.datetime.combine(
  499. self.__zero_date, value.time())
  500. elif isinstance(value, datetime.time):
  501. value = datetime.datetime.combine(self.__zero_date, value)
  502. return value
  503. return process
  504. _reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?")
  505. def result_processor(self, dialect, coltype):
  506. def process(value):
  507. if isinstance(value, datetime.datetime):
  508. return value.time()
  509. elif isinstance(value, util.string_types):
  510. m = self._reg.match(value)
  511. if not m:
  512. raise ValueError(
  513. "could not parse %r as a time value" % (value, ))
  514. return datetime.time(*[
  515. int(x or 0)
  516. for x in m.groups()])
  517. else:
  518. return value
  519. return process
  520. _MSTime = TIME
  521. class _DateTimeBase(object):
  522. def bind_processor(self, dialect):
  523. def process(value):
  524. if type(value) == datetime.date:
  525. return datetime.datetime(value.year, value.month, value.day)
  526. else:
  527. return value
  528. return process
  529. class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
  530. pass
  531. class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
  532. __visit_name__ = 'SMALLDATETIME'
  533. class DATETIME2(_DateTimeBase, sqltypes.DateTime):
  534. __visit_name__ = 'DATETIME2'
  535. def __init__(self, precision=None, **kw):
  536. super(DATETIME2, self).__init__(**kw)
  537. self.precision = precision
  538. # TODO: is this not an Interval ?
  539. class DATETIMEOFFSET(sqltypes.TypeEngine):
  540. __visit_name__ = 'DATETIMEOFFSET'
  541. def __init__(self, precision=None, **kwargs):
  542. self.precision = precision
  543. class _StringType(object):
  544. """Base for MSSQL string types."""
  545. def __init__(self, collation=None):
  546. super(_StringType, self).__init__(collation=collation)
  547. class NTEXT(sqltypes.UnicodeText):
  548. """MSSQL NTEXT type, for variable-length unicode text up to 2^30
  549. characters."""
  550. __visit_name__ = 'NTEXT'
  551. class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary):
  552. """The MSSQL VARBINARY type.
  553. This type extends both :class:`.types.VARBINARY` and
  554. :class:`.types.LargeBinary`. In "deprecate_large_types" mode,
  555. the :class:`.types.LargeBinary` type will produce ``VARBINARY(max)``
  556. on SQL Server.
  557. .. versionadded:: 1.0.0
  558. .. seealso::
  559. :ref:`mssql_large_type_deprecation`
  560. """
  561. __visit_name__ = 'VARBINARY'
  562. class IMAGE(sqltypes.LargeBinary):
  563. __visit_name__ = 'IMAGE'
  564. class BIT(sqltypes.TypeEngine):
  565. __visit_name__ = 'BIT'
  566. class MONEY(sqltypes.TypeEngine):
  567. __visit_name__ = 'MONEY'
  568. class SMALLMONEY(sqltypes.TypeEngine):
  569. __visit_name__ = 'SMALLMONEY'
  570. class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
  571. __visit_name__ = "UNIQUEIDENTIFIER"
  572. class SQL_VARIANT(sqltypes.TypeEngine):
  573. __visit_name__ = 'SQL_VARIANT'
  574. # old names.
  575. MSDateTime = _MSDateTime
  576. MSDate = _MSDate
  577. MSReal = REAL
  578. MSTinyInteger = TINYINT
  579. MSTime = TIME
  580. MSSmallDateTime = SMALLDATETIME
  581. MSDateTime2 = DATETIME2
  582. MSDateTimeOffset = DATETIMEOFFSET
  583. MSText = TEXT
  584. MSNText = NTEXT
  585. MSString = VARCHAR
  586. MSNVarchar = NVARCHAR
  587. MSChar = CHAR
  588. MSNChar = NCHAR
  589. MSBinary = BINARY
  590. MSVarBinary = VARBINARY
  591. MSImage = IMAGE
  592. MSBit = BIT
  593. MSMoney = MONEY
  594. MSSmallMoney = SMALLMONEY
  595. MSUniqueIdentifier = UNIQUEIDENTIFIER
  596. MSVariant = SQL_VARIANT
  597. ischema_names = {
  598. 'int': INTEGER,
  599. 'bigint': BIGINT,
  600. 'smallint': SMALLINT,
  601. 'tinyint': TINYINT,
  602. 'varchar': VARCHAR,
  603. 'nvarchar': NVARCHAR,
  604. 'char': CHAR,
  605. 'nchar': NCHAR,
  606. 'text': TEXT,
  607. 'ntext': NTEXT,
  608. 'decimal': DECIMAL,
  609. 'numeric': NUMERIC,
  610. 'float': FLOAT,
  611. 'datetime': DATETIME,
  612. 'datetime2': DATETIME2,
  613. 'datetimeoffset': DATETIMEOFFSET,
  614. 'date': DATE,
  615. 'time': TIME,
  616. 'smalldatetime': SMALLDATETIME,
  617. 'binary': BINARY,
  618. 'varbinary': VARBINARY,
  619. 'bit': BIT,
  620. 'real': REAL,
  621. 'image': IMAGE,
  622. 'timestamp': TIMESTAMP,
  623. 'money': MONEY,
  624. 'smallmoney': SMALLMONEY,
  625. 'uniqueidentifier': UNIQUEIDENTIFIER,
  626. 'sql_variant': SQL_VARIANT,
  627. }
  628. class MSTypeCompiler(compiler.GenericTypeCompiler):
  629. def _extend(self, spec, type_, length=None):
  630. """Extend a string-type declaration with standard SQL
  631. COLLATE annotations.
  632. """
  633. if getattr(type_, 'collation', None):
  634. collation = 'COLLATE %s' % type_.collation
  635. else:
  636. collation = None
  637. if not length:
  638. length = type_.length
  639. if length:
  640. spec = spec + "(%s)" % length
  641. return ' '.join([c for c in (spec, collation)
  642. if c is not None])
  643. def visit_FLOAT(self, type_, **kw):
  644. precision = getattr(type_, 'precision', None)
  645. if precision is None:
  646. return "FLOAT"
  647. else:
  648. return "FLOAT(%(precision)s)" % {'precision': precision}
  649. def visit_TINYINT(self, type_, **kw):
  650. return "TINYINT"
  651. def visit_DATETIMEOFFSET(self, type_, **kw):
  652. if type_.precision is not None:
  653. return "DATETIMEOFFSET(%s)" % type_.precision
  654. else:
  655. return "DATETIMEOFFSET"
  656. def visit_TIME(self, type_, **kw):
  657. precision = getattr(type_, 'precision', None)
  658. if precision is not None:
  659. return "TIME(%s)" % precision
  660. else:
  661. return "TIME"
  662. def visit_DATETIME2(self, type_, **kw):
  663. precision = getattr(type_, 'precision', None)
  664. if precision is not None:
  665. return "DATETIME2(%s)" % precision
  666. else:
  667. return "DATETIME2"
  668. def visit_SMALLDATETIME(self, type_, **kw):
  669. return "SMALLDATETIME"
  670. def visit_unicode(self, type_, **kw):
  671. return self.visit_NVARCHAR(type_, **kw)
  672. def visit_text(self, type_, **kw):
  673. if self.dialect.deprecate_large_types:
  674. return self.visit_VARCHAR(type_, **kw)
  675. else:
  676. return self.visit_TEXT(type_, **kw)
  677. def visit_unicode_text(self, type_, **kw):
  678. if self.dialect.deprecate_large_types:
  679. return self.visit_NVARCHAR(type_, **kw)
  680. else:
  681. return self.visit_NTEXT(type_, **kw)
  682. def visit_NTEXT(self, type_, **kw):
  683. return self._extend("NTEXT", type_)
  684. def visit_TEXT(self, type_, **kw):
  685. return self._extend("TEXT", type_)
  686. def visit_VARCHAR(self, type_, **kw):
  687. return self._extend("VARCHAR", type_, length=type_.length or 'max')
  688. def visit_CHAR(self, type_, **kw):
  689. return self._extend("CHAR", type_)
  690. def visit_NCHAR(self, type_, **kw):
  691. return self._extend("NCHAR", type_)
  692. def visit_NVARCHAR(self, type_, **kw):
  693. return self._extend("NVARCHAR", type_, length=type_.length or 'max')
  694. def visit_date(self, type_, **kw):
  695. if self.dialect.server_version_info < MS_2008_VERSION:
  696. return self.visit_DATETIME(type_, **kw)
  697. else:
  698. return self.visit_DATE(type_, **kw)
  699. def visit_time(self, type_, **kw):
  700. if self.dialect.server_version_info < MS_2008_VERSION:
  701. return self.visit_DATETIME(type_, **kw)
  702. else:
  703. return self.visit_TIME(type_, **kw)
  704. def visit_large_binary(self, type_, **kw):
  705. if self.dialect.deprecate_large_types:
  706. return self.visit_VARBINARY(type_, **kw)
  707. else:
  708. return self.visit_IMAGE(type_, **kw)
  709. def visit_IMAGE(self, type_, **kw):
  710. return "IMAGE"
  711. def visit_VARBINARY(self, type_, **kw):
  712. return self._extend(
  713. "VARBINARY",
  714. type_,
  715. length=type_.length or 'max')
  716. def visit_boolean(self, type_, **kw):
  717. return self.visit_BIT(type_)
  718. def visit_BIT(self, type_, **kw):
  719. return "BIT"
  720. def visit_MONEY(self, type_, **kw):
  721. return "MONEY"
  722. def visit_SMALLMONEY(self, type_, **kw):
  723. return 'SMALLMONEY'
  724. def visit_UNIQUEIDENTIFIER(self, type_, **kw):
  725. return "UNIQUEIDENTIFIER"
  726. def visit_SQL_VARIANT(self, type_, **kw):
  727. return 'SQL_VARIANT'
  728. class MSExecutionContext(default.DefaultExecutionContext):
  729. _enable_identity_insert = False
  730. _select_lastrowid = False
  731. _result_proxy = None
  732. _lastrowid = None
  733. def _opt_encode(self, statement):
  734. if not self.dialect.supports_unicode_statements:
  735. return self.dialect._encoder(statement)[0]
  736. else:
  737. return statement
  738. def pre_exec(self):
  739. """Activate IDENTITY_INSERT if needed."""
  740. if self.isinsert:
  741. tbl = self.compiled.statement.table
  742. seq_column = tbl._autoincrement_column
  743. insert_has_sequence = seq_column is not None
  744. if insert_has_sequence:
  745. self._enable_identity_insert = \
  746. seq_column.key in self.compiled_parameters[0] or \
  747. (
  748. self.compiled.statement.parameters and (
  749. (
  750. self.compiled.statement._has_multi_parameters
  751. and
  752. seq_column.key in
  753. self.compiled.statement.parameters[0]
  754. ) or (
  755. not
  756. self.compiled.statement._has_multi_parameters
  757. and
  758. seq_column.key in
  759. self.compiled.statement.parameters
  760. )
  761. )
  762. )
  763. else:
  764. self._enable_identity_insert = False
  765. self._select_lastrowid = not self.compiled.inline and \
  766. insert_has_sequence and \
  767. not self.compiled.returning and \
  768. not self._enable_identity_insert and \
  769. not self.executemany
  770. if self._enable_identity_insert:
  771. self.root_connection._cursor_execute(
  772. self.cursor,
  773. self._opt_encode(
  774. "SET IDENTITY_INSERT %s ON" %
  775. self.dialect.identifier_preparer.format_table(tbl)),
  776. (),
  777. self)
  778. def post_exec(self):
  779. """Disable IDENTITY_INSERT if enabled."""
  780. conn = self.root_connection
  781. if self._select_lastrowid:
  782. if self.dialect.use_scope_identity:
  783. conn._cursor_execute(
  784. self.cursor,
  785. "SELECT scope_identity() AS lastrowid", (), self)
  786. else:
  787. conn._cursor_execute(self.cursor,
  788. "SELECT @@identity AS lastrowid",
  789. (),
  790. self)
  791. # fetchall() ensures the cursor is consumed without closing it
  792. row = self.cursor.fetchall()[0]
  793. self._lastrowid = int(row[0])
  794. if (self.isinsert or self.isupdate or self.isdelete) and \
  795. self.compiled.returning:
  796. self._result_proxy = engine.FullyBufferedResultProxy(self)
  797. if self._enable_identity_insert:
  798. conn._cursor_execute(
  799. self.cursor,
  800. self._opt_encode(
  801. "SET IDENTITY_INSERT %s OFF" %
  802. self.dialect.identifier_preparer. format_table(
  803. self.compiled.statement.table)),
  804. (),
  805. self)
  806. def get_lastrowid(self):
  807. return self._lastrowid
  808. def handle_dbapi_exception(self, e):
  809. if self._enable_identity_insert:
  810. try:
  811. self.cursor.execute(
  812. self._opt_encode(
  813. "SET IDENTITY_INSERT %s OFF" %
  814. self.dialect.identifier_preparer. format_table(
  815. self.compiled.statement.table)))
  816. except Exception:
  817. pass
  818. def get_result_proxy(self):
  819. if self._result_proxy:
  820. return self._result_proxy
  821. else:
  822. return engine.ResultProxy(self)
  823. class MSSQLCompiler(compiler.SQLCompiler):
  824. returning_precedes_values = True
  825. extract_map = util.update_copy(
  826. compiler.SQLCompiler.extract_map,
  827. {
  828. 'doy': 'dayofyear',
  829. 'dow': 'weekday',
  830. 'milliseconds': 'millisecond',
  831. 'microseconds': 'microsecond'
  832. })
  833. def __init__(self, *args, **kwargs):
  834. self.tablealiases = {}
  835. super(MSSQLCompiler, self).__init__(*args, **kwargs)
  836. def _with_legacy_schema_aliasing(fn):
  837. def decorate(self, *arg, **kw):
  838. if self.dialect.legacy_schema_aliasing:
  839. return fn(self, *arg, **kw)
  840. else:
  841. super_ = getattr(super(MSSQLCompiler, self), fn.__name__)
  842. return super_(*arg, **kw)
  843. return decorate
  844. def visit_now_func(self, fn, **kw):
  845. return "CURRENT_TIMESTAMP"
  846. def visit_current_date_func(self, fn, **kw):
  847. return "GETDATE()"
  848. def visit_length_func(self, fn, **kw):
  849. return "LEN%s" % self.function_argspec(fn, **kw)
  850. def visit_char_length_func(self, fn, **kw):
  851. return "LEN%s" % self.function_argspec(fn, **kw)
  852. def visit_concat_op_binary(self, binary, operator, **kw):
  853. return "%s + %s" % \
  854. (self.process(binary.left, **kw),
  855. self.process(binary.right, **kw))
  856. def visit_true(self, expr, **kw):
  857. return '1'
  858. def visit_false(self, expr, **kw):
  859. return '0'
  860. def visit_match_op_binary(self, binary, operator, **kw):
  861. return "CONTAINS (%s, %s)" % (
  862. self.process(binary.left, **kw),
  863. self.process(binary.right, **kw))
  864. def get_select_precolumns(self, select, **kw):
  865. """ MS-SQL puts TOP, it's version of LIMIT here """
  866. s = ""
  867. if select._distinct:
  868. s += "DISTINCT "
  869. if select._simple_int_limit and not select._offset:
  870. # ODBC drivers and possibly others
  871. # don't support bind params in the SELECT clause on SQL Server.
  872. # so have to use literal here.
  873. s += "TOP %d " % select._limit
  874. if s:
  875. return s
  876. else:
  877. return compiler.SQLCompiler.get_select_precolumns(
  878. self, select, **kw)
  879. def get_from_hint_text(self, table, text):
  880. return text
  881. def get_crud_hint_text(self, table, text):
  882. return text
  883. def limit_clause(self, select, **kw):
  884. # Limit in mssql is after the select keyword
  885. return ""
  886. def visit_select(self, select, **kwargs):
  887. """Look for ``LIMIT`` and OFFSET in a select statement, and if
  888. so tries to wrap it in a subquery with ``row_number()`` criterion.
  889. """
  890. if (
  891. (
  892. not select._simple_int_limit and
  893. select._limit_clause is not None
  894. ) or (
  895. select._offset_clause is not None and
  896. not select._simple_int_offset or select._offset
  897. )
  898. ) and not getattr(select, '_mssql_visit', None):
  899. # to use ROW_NUMBER(), an ORDER BY is required.
  900. if not select._order_by_clause.clauses:
  901. raise exc.CompileError('MSSQL requires an order_by when '
  902. 'using an OFFSET or a non-simple '
  903. 'LIMIT clause')
  904. _order_by_clauses = [
  905. sql_util.unwrap_label_reference(elem)
  906. for elem in select._order_by_clause.clauses
  907. ]
  908. limit_clause = select._limit_clause
  909. offset_clause = select._offset_clause
  910. kwargs['select_wraps_for'] = select
  911. select = select._generate()
  912. select._mssql_visit = True
  913. select = select.column(
  914. sql.func.ROW_NUMBER().over(order_by=_order_by_clauses)
  915. .label("mssql_rn")).order_by(None).alias()
  916. mssql_rn = sql.column('mssql_rn')
  917. limitselect = sql.select([c for c in select.c if
  918. c.key != 'mssql_rn'])
  919. if offset_clause is not None:
  920. limitselect.append_whereclause(mssql_rn > offset_clause)
  921. if limit_clause is not None:
  922. limitselect.append_whereclause(
  923. mssql_rn <= (limit_clause + offset_clause))
  924. else:
  925. limitselect.append_whereclause(
  926. mssql_rn <= (limit_clause))
  927. return self.process(limitselect, **kwargs)
  928. else:
  929. return compiler.SQLCompiler.visit_select(self, select, **kwargs)
  930. @_with_legacy_schema_aliasing
  931. def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
  932. if mssql_aliased is table or iscrud:
  933. return super(MSSQLCompiler, self).visit_table(table, **kwargs)
  934. # alias schema-qualified tables
  935. alias = self._schema_aliased_table(table)
  936. if alias is not None:
  937. return self.process(alias, mssql_aliased=table, **kwargs)
  938. else:
  939. return super(MSSQLCompiler, self).visit_table(table, **kwargs)
  940. @_with_legacy_schema_aliasing
  941. def visit_alias(self, alias, **kw):
  942. # translate for schema-qualified table aliases
  943. kw['mssql_aliased'] = alias.original
  944. return super(MSSQLCompiler, self).visit_alias(alias, **kw)
  945. @_with_legacy_schema_aliasing
  946. def visit_column(self, column, add_to_result_map=None, **kw):
  947. if column.table is not None and \
  948. (not self.isupdate and not self.isdelete) or \
  949. self.is_subquery():
  950. # translate for schema-qualified table aliases
  951. t = self._schema_aliased_table(column.table)
  952. if t is not None:
  953. converted = expression._corresponding_column_or_error(
  954. t, column)
  955. if add_to_result_map is not None:
  956. add_to_result_map(
  957. column.name,
  958. column.name,
  959. (column, column.name, column.key),
  960. column.type
  961. )
  962. return super(MSSQLCompiler, self).\
  963. visit_column(converted, **kw)
  964. return super(MSSQLCompiler, self).visit_column(
  965. column, add_to_result_map=add_to_result_map, **kw)
  966. def _schema_aliased_table(self, table):
  967. if getattr(table, 'schema', None) is not None:
  968. if table not in self.tablealiases:
  969. self.tablealiases[table] = table.alias()
  970. return self.tablealiases[table]
  971. else:
  972. return None
  973. def visit_extract(self, extract, **kw):
  974. field = self.extract_map.get(extract.field, extract.field)
  975. return 'DATEPART(%s, %s)' % \
  976. (field, self.process(extract.expr, **kw))
  977. def visit_savepoint(self, savepoint_stmt):
  978. return "SAVE TRANSACTION %s" % \
  979. self.preparer.format_savepoint(savepoint_stmt)
  980. def visit_rollback_to_savepoint(self, savepoint_stmt):
  981. return ("ROLLBACK TRANSACTION %s"
  982. % self.preparer.format_savepoint(savepoint_stmt))
  983. def visit_binary(self, binary, **kwargs):
  984. """Move bind parameters to the right-hand side of an operator, where
  985. possible.
  986. """
  987. if (
  988. isinstance(binary.left, expression.BindParameter)
  989. and binary.operator == operator.eq
  990. and not isinstance(binary.right, expression.BindParameter)
  991. ):
  992. return self.process(
  993. expression.BinaryExpression(binary.right,
  994. binary.left,
  995. binary.operator),
  996. **kwargs)
  997. return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
  998. def returning_clause(self, stmt, returning_cols):
  999. if self.isinsert or self.isupdate:
  1000. target = stmt.table.alias("inserted")
  1001. else:
  1002. target = stmt.table.alias("deleted")
  1003. adapter = sql_util.ClauseAdapter(target)
  1004. columns = [
  1005. self._label_select_column(None, adapter.traverse(c),
  1006. True, False, {})
  1007. for c in expression._select_iterables(returning_cols)
  1008. ]
  1009. return 'OUTPUT ' + ', '.join(columns)
  1010. def get_cte_preamble(self, recursive):
  1011. # SQL Server finds it too inconvenient to accept
  1012. # an entirely optional, SQL standard specified,
  1013. # "RECURSIVE" word with their "WITH",
  1014. # so here we go
  1015. return "WITH"
  1016. def label_select_column(self, select, column, asfrom):
  1017. if isinstance(column, expression.Function):
  1018. return column.label(None)
  1019. else:
  1020. return super(MSSQLCompiler, self).\
  1021. label_select_column(select, column, asfrom)
  1022. def for_update_clause(self, select):
  1023. # "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
  1024. # SQLAlchemy doesn't use
  1025. return ''
  1026. def order_by_clause(self, select, **kw):
  1027. order_by = self.process(select._order_by_clause, **kw)
  1028. # MSSQL only allows ORDER BY in subqueries if there is a LIMIT
  1029. if order_by and (not self.is_subquery() or select._limit):
  1030. return " ORDER BY " + order_by
  1031. else:
  1032. return ""
  1033. def update_from_clause(self, update_stmt,
  1034. from_table, extra_froms,
  1035. from_hints,
  1036. **kw):
  1037. """Render the UPDATE..FROM clause specific to MSSQL.
  1038. In MSSQL, if the UPDATE statement involves an alias of the table to
  1039. be updated, then the table itself must be added to the FROM list as
  1040. well. Otherwise, it is optional. Here, we add it regardless.
  1041. """
  1042. return "FROM " + ', '.join(
  1043. t._compiler_dispatch(self, asfrom=True,
  1044. fromhints=from_hints, **kw)
  1045. for t in [from_table] + extra_froms)
  1046. class MSSQLStrictCompiler(MSSQLCompiler):
  1047. """A subclass of MSSQLCompiler which disables the usage of bind
  1048. parameters where not allowed natively by MS-SQL.
  1049. A dialect may use this compiler on a platform where native
  1050. binds are used.
  1051. """
  1052. ansi_bind_rules = True
  1053. def visit_in_op_binary(self, binary, operator, **kw):
  1054. kw['literal_binds'] = True
  1055. return "%s IN %s" % (
  1056. self.process(binary.left, **kw),
  1057. self.process(binary.right, **kw)
  1058. )
  1059. def visit_notin_op_binary(self, binary, operator, **kw):
  1060. kw['literal_binds'] = True
  1061. return "%s NOT IN %s" % (
  1062. self.process(binary.left, **kw),
  1063. self.process(binary.right, **kw)
  1064. )
  1065. def render_literal_value(self, value, type_):
  1066. """
  1067. For date and datetime values, convert to a string
  1068. format acceptable to MSSQL. That seems to be the
  1069. so-called ODBC canonical date format which looks
  1070. like this:
  1071. yyyy-mm-dd hh:mi:ss.mmm(24h)
  1072. For other data types, call the base class implementation.
  1073. """
  1074. # datetime and date are both subclasses of datetime.date
  1075. if issubclass(type(value), datetime.date):
  1076. # SQL Server wants single quotes around the date string.
  1077. return "'" + str(value) + "'"
  1078. else:
  1079. return super(MSSQLStrictCompiler, self).\
  1080. render_literal_value(value, type_)
  1081. class MSDDLCompiler(compiler.DDLCompiler):
  1082. def get_column_specification(self, column, **kwargs):
  1083. colspec = (
  1084. self.preparer.format_column(column) + " "
  1085. + self.dialect.type_compiler.process(
  1086. column.type, type_expression=column)
  1087. )
  1088. if column.nullable is not None:
  1089. if not column.nullable or column.primary_key or \
  1090. isinstance(column.default, sa_schema.Sequence):
  1091. colspec += " NOT NULL"
  1092. else:
  1093. colspec += " NULL"
  1094. if column.table is None:
  1095. raise exc.CompileError(
  1096. "mssql requires Table-bound columns "
  1097. "in order to generate DDL")
  1098. # install an IDENTITY Sequence if we either a sequence or an implicit
  1099. # IDENTITY column
  1100. if isinstance(column.default, sa_schema.Sequence):
  1101. if column.default.start == 0:
  1102. start = 0
  1103. else:
  1104. start = column.default.start or 1
  1105. colspec += " IDENTITY(%s,%s)" % (start,
  1106. column.default.increment or 1)
  1107. elif column is column.table._autoincrement_column:
  1108. colspec += " IDENTITY(1,1)"
  1109. else:
  1110. default = self.get_column_default_string(column)
  1111. if default is not None:
  1112. colspec += " DEFAULT " + default
  1113. return colspec
  1114. def visit_create_index(self, create, include_schema=False):
  1115. index = create.element
  1116. self._verify_index_table(index)
  1117. preparer = self.preparer
  1118. text = "CREATE "
  1119. if index.unique:
  1120. text += "UNIQUE "
  1121. # handle clustering option
  1122. clustered = index.dialect_options['mssql']['clustered']
  1123. if clustered is not None:
  1124. if clustered:
  1125. text += "CLUSTERED "
  1126. else:
  1127. text += "NONCLUSTERED "
  1128. text += "INDEX %s ON %s (%s)" \
  1129. % (
  1130. self._prepared_index_name(index,
  1131. include_schema=include_schema),
  1132. preparer.format_table(index.table),
  1133. ', '.join(
  1134. self.sql_compiler.process(expr,
  1135. include_table=False,
  1136. literal_binds=True) for
  1137. expr in index.expressions)
  1138. )
  1139. # handle other included columns
  1140. if index.dialect_options['mssql']['include']:
  1141. inclusions = [index.table.c[col]
  1142. if isinstance(col, util.string_types) else col
  1143. for col in
  1144. index.dialect_options['mssql']['include']
  1145. ]
  1146. text += " INCLUDE (%s)" \
  1147. % ', '.join([preparer.quote(c.name)
  1148. for c in inclusions])
  1149. return text
  1150. def visit_drop_index(self, drop):
  1151. return "\nDROP INDEX %s ON %s" % (
  1152. self._prepared_index_name(drop.element, include_schema=False),
  1153. self.preparer.format_table(drop.element.table)
  1154. )
  1155. def visit_primary_key_constraint(self, constraint):
  1156. if len(constraint) == 0:
  1157. return ''
  1158. text = ""
  1159. if constraint.name is not None:
  1160. text += "CONSTRAINT %s " % \
  1161. self.preparer.format_constraint(constraint)
  1162. text += "PRIMARY KEY "
  1163. clustered = constraint.dialect_options['mssql']['clustered']
  1164. if clustered is not None:
  1165. if clustered:
  1166. text += "CLUSTERED "
  1167. else:
  1168. text += "NONCLUSTERED "
  1169. text += "(%s)" % ', '.join(self.preparer.quote(c.name)
  1170. for c in constraint)
  1171. text += self.define_constraint_deferrability(constraint)
  1172. return text
  1173. def visit_unique_constraint(self, constraint):
  1174. if len(constraint) == 0:
  1175. return ''
  1176. text = ""
  1177. if constraint.name is not None:
  1178. text += "CONSTRAINT %s " % \
  1179. self.preparer.format_constraint(constraint)
  1180. text += "UNIQUE "
  1181. clustered = constraint.dialect_options['mssql']['clustered']
  1182. if clustered is not None:
  1183. if clustered:
  1184. text += "CLUSTERED "
  1185. else:
  1186. text += "NONCLUSTERED "
  1187. text += "(%s)" % ', '.join(self.preparer.quote(c.name)
  1188. for c in constraint)
  1189. text += self.define_constraint_deferrability(constraint)
  1190. return text
  1191. class MSIdentifierPreparer(compiler.IdentifierPreparer):
  1192. reserved_words = RESERVED_WORDS
  1193. def __init__(self, dialect):
  1194. super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[',
  1195. final_quote=']')
  1196. def _escape_identifier(self, value):
  1197. return value
  1198. def quote_schema(self, schema, force=None):
  1199. """Prepare a quoted table and schema name."""
  1200. result = '.'.join([self.quote(x, force) for x in schema.split('.')])
  1201. return result
  1202. def _db_plus_owner_listing(fn):
  1203. def wrap(dialect, connection, schema=None, **kw):
  1204. dbname, owner = _owner_plus_db(dialect, schema)
  1205. return _switch_db(dbname, connection, fn, dialect, connection,
  1206. dbname, owner, schema, **kw)
  1207. return update_wrapper(wrap, fn)
  1208. def _db_plus_owner(fn):
  1209. def wrap(dialect, connection, tablename, schema=None, **kw):
  1210. dbname, owner = _owner_plus_db(dialect, schema)
  1211. return _switch_db(dbname, connection, fn, dialect, connection,
  1212. tablename, dbname, owner, schema, **kw)
  1213. return update_wrapper(wrap, fn)
  1214. def _switch_db(dbname, connection, fn, *arg, **kw):
  1215. if dbname:
  1216. current_db = connection.scalar("select db_name()")
  1217. connection.execute("use %s" % dbname)
  1218. try:
  1219. return fn(*arg, **kw)
  1220. finally:
  1221. if dbname:
  1222. connection.execute("use %s" % current_db)
  1223. def _owner_plus_db(dialect, schema):
  1224. if not schema:
  1225. return None, dialect.default_schema_name
  1226. elif "." in schema:
  1227. return schema.split(".", 1)
  1228. else:
  1229. return None, schema
  1230. class MSDialect(default.DefaultDialect):
  1231. name = 'mssql'
  1232. supports_default_values = True
  1233. supports_empty_insert = False
  1234. execution_ctx_cls = MSExecutionContext
  1235. use_scope_identity = True
  1236. max_identifier_length = 128
  1237. schema_name = "dbo"
  1238. colspecs = {
  1239. sqltypes.DateTime: _MSDateTime,
  1240. sqltypes.Date: _MSDate,
  1241. sqltypes.Time: TIME,
  1242. }
  1243. engine_config_types = default.DefaultDialect.engine_config_types.union([
  1244. ('legacy_schema_aliasing', util.asbool),
  1245. ])
  1246. ischema_names = ischema_names
  1247. supports_native_boolean = False
  1248. supports_unicode_binds = True
  1249. postfetch_lastrowid = True
  1250. server_version_info = ()
  1251. statement_compiler = MSSQLCompiler
  1252. ddl_compiler = MSDDLCompiler
  1253. type_compiler = MSTypeCompiler
  1254. preparer = MSIdentifierPreparer
  1255. construct_arguments = [
  1256. (sa_schema.PrimaryKeyConstraint, {
  1257. "clustered": None
  1258. }),
  1259. (sa_schema.UniqueConstraint, {
  1260. "clustered": None
  1261. }),
  1262. (sa_schema.Index, {
  1263. "clustered": None,
  1264. "include": None
  1265. })
  1266. ]
  1267. def __init__(self,
  1268. query_timeout=None,
  1269. use_scope_identity=True,
  1270. max_identifier_length=None,
  1271. schema_name="dbo",
  1272. isolation_level=None,
  1273. deprecate_large_types=None,
  1274. legacy_schema_aliasing=False, **opts):
  1275. self.query_timeout = int(query_timeout or 0)
  1276. self.schema_name = schema_name
  1277. self.use_scope_identity = use_scope_identity
  1278. self.max_identifier_length = int(max_identifier_length or 0) or \
  1279. self.max_identifier_length
  1280. self.deprecate_large_types = deprecate_large_types
  1281. self.legacy_schema_aliasing = legacy_schema_aliasing
  1282. super(MSDialect, self).__init__(**opts)
  1283. self.isolation_level = isolation_level
  1284. def do_savepoint(self, connection, name):
  1285. # give the DBAPI a push
  1286. connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
  1287. super(MSDialect, self).do_savepoint(connection, name)
  1288. def do_release_savepoint(self, connection, name):
  1289. # SQL Server does not support RELEASE SAVEPOINT
  1290. pass
  1291. _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
  1292. 'READ COMMITTED', 'REPEATABLE READ',
  1293. 'SNAPSHOT'])
  1294. def set_isolation_level(self, connection, level):
  1295. level = level.replace('_', ' ')
  1296. if level not in self._isolation_lookup:
  1297. raise exc.ArgumentError(
  1298. "Invalid value '%s' for isolation_level. "
  1299. "Valid isolation levels for %s are %s" %
  1300. (level, self.name, ", ".join(self._isolation_lookup))
  1301. )
  1302. cursor = connection.cursor()
  1303. cursor.execute(
  1304. "SET TRANSACTION ISOLATION LEVEL %s" % level)
  1305. cursor.close()
  1306. def get_isolation_level(self, connection):
  1307. if self.server_version_info < MS_2005_VERSION:
  1308. raise NotImplementedError(
  1309. "Can't fetch isolation level prior to SQL Server 2005")
  1310. cursor = connection.cursor()
  1311. cursor.execute("""
  1312. SELECT CASE transaction_isolation_level
  1313. WHEN 0 THEN NULL
  1314. WHEN 1 THEN 'READ UNCOMMITTED'
  1315. WHEN 2 THEN 'READ COMMITTED'
  1316. WHEN 3 THEN 'REPEATABLE READ'
  1317. WHEN 4 THEN 'SERIALIZABLE'
  1318. WHEN 5 THEN 'SNAPSHOT' END AS TRANSACTION_ISOLATION_LEVEL
  1319. FROM sys.dm_exec_sessions
  1320. where session_id = @@SPID
  1321. """)
  1322. val = cursor.fetchone()[0]
  1323. cursor.close()
  1324. return val.upper()
  1325. def initialize(self, connection):
  1326. super(MSDialect, self).initialize(connection)
  1327. self._setup_version_attributes()
  1328. def on_connect(self):
  1329. if self.isolation_level is not None:
  1330. def connect(conn):
  1331. self.set_isolation_level(conn, self.isolation_level)
  1332. return connect
  1333. else:
  1334. return None
  1335. def _setup_version_attributes(self):
  1336. if self.server_version_info[0] not in list(range(8, 17)):
  1337. util.warn(
  1338. "Unrecognized server version info '%s'. Some SQL Server "
  1339. "features may not function properly." %
  1340. ".".join(str(x) for x in self.server_version_info))
  1341. if self.server_version_info >= MS_2005_VERSION and \
  1342. 'implicit_returning' not in self.__dict__:
  1343. self.implicit_returning = True
  1344. if self.server_version_info >= MS_2008_VERSION:
  1345. self.supports_multivalues_insert = True
  1346. if self.deprecate_large_types is None:
  1347. self.deprecate_large_types = \
  1348. self.server_version_info >= MS_2012_VERSION
  1349. def _get_default_schema_name(self, connection):
  1350. if self.server_version_info < MS_2005_VERSION:
  1351. return self.schema_name
  1352. else:
  1353. query = sql.text("SELECT schema_name()")
  1354. default_schema_name = connection.scalar(query)
  1355. if default_schema_name is not None:
  1356. return util.text_type(default_schema_name)
  1357. else:
  1358. return self.schema_name
  1359. @_db_plus_owner
  1360. def has_table(self, connection, tablename, dbname, owner, schema):
  1361. columns = ischema.columns
  1362. whereclause = columns.c.table_name == tablename
  1363. if owner:
  1364. whereclause = sql.and_(whereclause,
  1365. columns.c.table_schema == owner)
  1366. s = sql.select([columns], whereclause)
  1367. c = connection.execute(s)
  1368. return c.first() is not None
  1369. @reflection.cache
  1370. def get_schema_names(self, connection, **kw):
  1371. s = sql.select([ischema.schemata.c.schema_name],
  1372. order_by=[ischema.schemata.c.schema_name]
  1373. )
  1374. schema_names = [r[0] for r in connection.execute(s)]
  1375. return schema_names
  1376. @reflection.cache
  1377. @_db_plus_owner_listing
  1378. def get_table_names(self, connection, dbname, owner, schema, **kw):
  1379. tables = ischema.tables
  1380. s = sql.select([tables.c.table_name],
  1381. sql.and_(
  1382. tables.c.table_schema == owner,
  1383. tables.c.table_type == 'BASE TABLE'
  1384. ),
  1385. order_by=[tables.c.table_name]
  1386. )
  1387. table_names = [r[0] for r in connection.execute(s)]
  1388. return table_names
  1389. @reflection.cache
  1390. @_db_plus_owner_listing
  1391. def get_view_names(self, connection, dbname, owner, schema, **kw):
  1392. tables = ischema.tables
  1393. s = sql.select([tables.c.table_name],
  1394. sql.and_(
  1395. tables.c.table_schema == owner,
  1396. tables.c.table_type == 'VIEW'
  1397. ),
  1398. order_by=[tables.c.table_name]
  1399. )
  1400. view_names = [r[0] for r in connection.execute(s)]
  1401. return view_names
  1402. @reflection.cache
  1403. @_db_plus_owner
  1404. def get_indexes(self, connection, tablename, dbname, owner, schema, **kw):
  1405. # using system catalogs, don't support index reflection
  1406. # below MS 2005
  1407. if self.server_version_info < MS_2005_VERSION:
  1408. return []
  1409. rp = connection.execute(
  1410. sql.text("select ind.index_id, ind.is_unique, ind.name "
  1411. "from sys.indexes as ind join sys.tables as tab on "
  1412. "ind.object_id=tab.object_id "
  1413. "join sys.schemas as sch on sch.schema_id=tab.schema_id "
  1414. "where tab.name = :tabname "
  1415. "and sch.name=:schname "
  1416. "and ind.is_primary_key=0",
  1417. bindparams=[
  1418. sql.bindparam('tabname', tablename,
  1419. sqltypes.String(convert_unicode=True)),
  1420. sql.bindparam('schname', owner,
  1421. sqltypes.String(convert_unicode=True))
  1422. ],
  1423. typemap={
  1424. 'name': sqltypes.Unicode()
  1425. }
  1426. )
  1427. )
  1428. indexes = {}
  1429. for row in rp:
  1430. indexes[row['index_id']] = {
  1431. 'name': row['name'],
  1432. 'unique': row['is_unique'] == 1,
  1433. 'column_names': []
  1434. }
  1435. rp = connection.execute(
  1436. sql.text(
  1437. "select ind_col.index_id, ind_col.object_id, col.name "
  1438. "from sys.columns as col "
  1439. "join sys.tables as tab on tab.object_id=col.object_id "
  1440. "join sys.index_columns as ind_col on "
  1441. "(ind_col.column_id=col.column_id and "
  1442. "ind_col.object_id=tab.object_id) "
  1443. "join sys.schemas as sch on sch.schema_id=tab.schema_id "
  1444. "where tab.name=:tabname "
  1445. "and sch.name=:schname",
  1446. bindparams=[
  1447. sql.bindparam('tabname', tablename,
  1448. sqltypes.String(convert_unicode=True)),
  1449. sql.bindparam('schname', owner,
  1450. sqltypes.String(convert_unicode=True))
  1451. ],
  1452. typemap={'name': sqltypes.Unicode()}
  1453. ),
  1454. )
  1455. for row in rp:
  1456. if row['index_id'] in indexes:
  1457. indexes[row['index_id']]['column_names'].append(row['name'])
  1458. return list(indexes.values())
  1459. @reflection.cache
  1460. @_db_plus_owner
  1461. def get_view_definition(self, connection, viewname,
  1462. dbname, owner, schema, **kw):
  1463. rp = connection.execute(
  1464. sql.text(
  1465. "select definition from sys.sql_modules as mod, "
  1466. "sys.views as views, "
  1467. "sys.schemas as sch"
  1468. " where "
  1469. "mod.object_id=views.object_id and "
  1470. "views.schema_id=sch.schema_id and "
  1471. "views.name=:viewname and sch.name=:schname",
  1472. bindparams=[
  1473. sql.bindparam('viewname', viewname,
  1474. sqltypes.String(convert_unicode=True)),
  1475. sql.bindparam('schname', owner,
  1476. sqltypes.String(convert_unicode=True))
  1477. ]
  1478. )
  1479. )
  1480. if rp:
  1481. view_def = rp.scalar()
  1482. return view_def
  1483. @reflection.cache
  1484. @_db_plus_owner
  1485. def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
  1486. # Get base columns
  1487. columns = ischema.columns
  1488. if owner:
  1489. whereclause = sql.and_(columns.c.table_name == tablename,
  1490. columns.c.table_schema == owner)
  1491. else:
  1492. whereclause = columns.c.table_name == tablename
  1493. s = sql.select([columns], whereclause,
  1494. order_by=[columns.c.ordinal_position])
  1495. c = connection.execute(s)
  1496. cols = []
  1497. while True:
  1498. row = c.fetchone()
  1499. if row is None:
  1500. break
  1501. (name, type, nullable, charlen,
  1502. numericprec, numericscale, default, collation) = (
  1503. row[columns.c.column_name],
  1504. row[columns.c.data_type],
  1505. row[columns.c.is_nullable] == 'YES',
  1506. row[columns.c.character_maximum_length],
  1507. row[columns.c.numeric_precision],
  1508. row[columns.c.numeric_scale],
  1509. row[columns.c.column_default],
  1510. row[columns.c.collation_name]
  1511. )
  1512. coltype = self.ischema_names.get(type, None)
  1513. kwargs = {}
  1514. if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
  1515. MSNText, MSBinary, MSVarBinary,
  1516. sqltypes.LargeBinary):
  1517. if charlen == -1:
  1518. charlen = None
  1519. kwargs['length'] = charlen
  1520. if collation:
  1521. kwargs['collation'] = collation
  1522. if coltype is None:
  1523. util.warn(
  1524. "Did not recognize type '%s' of column '%s'" %
  1525. (type, name))
  1526. coltype = sqltypes.NULLTYPE
  1527. else:
  1528. if issubclass(coltype, sqltypes.Numeric) and \
  1529. coltype is not MSReal:
  1530. kwargs['scale'] = numericscale
  1531. kwargs['precision'] = numericprec
  1532. coltype = coltype(**kwargs)
  1533. cdict = {
  1534. 'name': name,
  1535. 'type': coltype,
  1536. 'nullable': nullable,
  1537. 'default': default,
  1538. 'autoincrement': False,
  1539. }
  1540. cols.append(cdict)
  1541. # autoincrement and identity
  1542. colmap = {}
  1543. for col in cols:
  1544. colmap[col['name']] = col
  1545. # We also run an sp_columns to check for identity columns:
  1546. cursor = connection.execute("sp_columns @table_name = '%s', "
  1547. "@table_owner = '%s'"
  1548. % (tablename, owner))
  1549. ic = None
  1550. while True:
  1551. row = cursor.fetchone()
  1552. if row is None:
  1553. break
  1554. (col_name, type_name) = row[3], row[5]
  1555. if type_name.endswith("identity") and col_name in colmap:
  1556. ic = col_name
  1557. colmap[col_name]['autoincrement'] = True
  1558. colmap[col_name]['sequence'] = dict(
  1559. name='%s_identity' % col_name)
  1560. break
  1561. cursor.close()
  1562. if ic is not None and self.server_version_info >= MS_2005_VERSION:
  1563. table_fullname = "%s.%s" % (owner, tablename)
  1564. cursor = connection.execute(
  1565. "select ident_seed('%s'), ident_incr('%s')"
  1566. % (table_fullname, table_fullname)
  1567. )
  1568. row = cursor.first()
  1569. if row is not None and row[0] is not None:
  1570. colmap[ic]['sequence'].update({
  1571. 'start': int(row[0]),
  1572. 'increment': int(row[1])
  1573. })
  1574. return cols
  1575. @reflection.cache
  1576. @_db_plus_owner
  1577. def get_pk_constraint(self, connection, tablename,
  1578. dbname, owner, schema, **kw):
  1579. pkeys = []
  1580. TC = ischema.constraints
  1581. C = ischema.key_constraints.alias('C')
  1582. # Primary key constraints
  1583. s = sql.select([C.c.column_name,
  1584. TC.c.constraint_type,
  1585. C.c.constraint_name],
  1586. sql.and_(TC.c.constraint_name == C.c.constraint_name,
  1587. TC.c.table_schema == C.c.table_schema,
  1588. C.c.table_name == tablename,
  1589. C.c.table_schema == owner)
  1590. )
  1591. c = connection.execute(s)
  1592. constraint_name = None
  1593. for row in c:
  1594. if 'PRIMARY' in row[TC.c.constraint_type.name]:
  1595. pkeys.append(row[0])
  1596. if constraint_name is None:
  1597. constraint_name = row[C.c.constraint_name.name]
  1598. return {'constrained_columns': pkeys, 'name': constraint_name}
  1599. @reflection.cache
  1600. @_db_plus_owner
  1601. def get_foreign_keys(self, connection, tablename,
  1602. dbname, owner, schema, **kw):
  1603. RR = ischema.ref_constraints
  1604. C = ischema.key_constraints.alias('C')
  1605. R = ischema.key_constraints.alias('R')
  1606. # Foreign key constraints
  1607. s = sql.select([C.c.column_name,
  1608. R.c.table_schema, R.c.table_name, R.c.column_name,
  1609. RR.c.constraint_name, RR.c.match_option,
  1610. RR.c.update_rule,
  1611. RR.c.delete_rule],
  1612. sql.and_(C.c.table_name == tablename,
  1613. C.c.table_schema == owner,
  1614. C.c.constraint_name == RR.c.constraint_name,
  1615. R.c.constraint_name ==
  1616. RR.c.unique_constraint_name,
  1617. C.c.ordinal_position == R.c.ordinal_position
  1618. ),
  1619. order_by=[RR.c.constraint_name, R.c.ordinal_position]
  1620. )
  1621. # group rows by constraint ID, to handle multi-column FKs
  1622. fkeys = []
  1623. fknm, scols, rcols = (None, [], [])
  1624. def fkey_rec():
  1625. return {
  1626. 'name': None,
  1627. 'constrained_columns': [],
  1628. 'referred_schema': None,
  1629. 'referred_table': None,
  1630. 'referred_columns': []
  1631. }
  1632. fkeys = util.defaultdict(fkey_rec)
  1633. for r in connection.execute(s).fetchall():
  1634. scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
  1635. rec = fkeys[rfknm]
  1636. rec['name'] = rfknm
  1637. if not rec['referred_table']:
  1638. rec['referred_table'] = rtbl
  1639. if schema is not None or owner != rschema:
  1640. if dbname:
  1641. rschema = dbname + "." + rschema
  1642. rec['referred_schema'] = rschema
  1643. local_cols, remote_cols = \
  1644. rec['constrained_columns'],\
  1645. rec['referred_columns']
  1646. local_cols.append(scol)
  1647. remote_cols.append(rcol)
  1648. return list(fkeys.values())